code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field( metadata={'help': 'The output directory where the model will be written.'} ,) _UpperCamelCase = field( metadata={ 'help': ( 'The encoder model checkpoint for weights initialization.' 'Don\'t set if you want to train an encoder model from scratch.' ) } ,) _UpperCamelCase = field( metadata={ 'help': ( 'The decoder model checkpoint for weights initialization.' 'Don\'t set if you want to train a decoder model from scratch.' ) } ,) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} ) def A__ ( ): lowerCamelCase__ = HfArgumentParser((ModelArguments,) ) ((lowerCamelCase__) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowerCamelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowerCamelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowerCamelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowerCamelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowerCAmelCase , decoder_config=__lowerCAmelCase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowerCamelCase__ = decoder_config.decoder_start_token_id lowerCamelCase__ = decoder_config.pad_token_id if decoder_start_token_id is None: lowerCamelCase__ = decoder_config.bos_token_id if pad_token_id is None: lowerCamelCase__ = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowerCamelCase__ = decoder_config.eos_token_id lowerCamelCase__ = decoder_start_token_id lowerCamelCase__ = pad_token_id lowerCamelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' UpperCamelCase : Dict = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *_lowerCAmelCase ,**_lowerCAmelCase ): pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' _UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = pipeline( """zero-shot-object-detection""" ,model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) lowerCamelCase__ = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = object_detector(examples[0] ,threshold=0.0 ) lowerCamelCase__ = len(_lowerCAmelCase ) self.assertGreater(_lowerCAmelCase ,0 ) self.assertEqual( _lowerCAmelCase ,[ { """score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase ), """box""": {"""xmin""": ANY(_lowerCAmelCase ), """ymin""": ANY(_lowerCAmelCase ), """xmax""": ANY(_lowerCAmelCase ), """ymax""": ANY(_lowerCAmelCase )}, } for i in range(_lowerCAmelCase ) ] ,) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def UpperCamelCase_ ( self ): pass @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = pipeline( """zero-shot-object-detection""" ,model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) lowerCamelCase__ = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,threshold=0.64 ,) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, ] ,) lowerCamelCase__ = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] ,threshold=0.64 ,) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, ] ] ,) @require_torch @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = pipeline("""zero-shot-object-detection""" ) lowerCamelCase__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ] ,) lowerCamelCase__ = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] ,) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ], ] ,) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def UpperCamelCase_ ( self ): pass @require_torch @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = 0.2 lowerCamelCase__ = pipeline("""zero-shot-object-detection""" ) lowerCamelCase__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,threshold=_lowerCAmelCase ,) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, ] ,) @require_torch @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = 2 lowerCamelCase__ = pipeline("""zero-shot-object-detection""" ) lowerCamelCase__ = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" ,candidate_labels=["""cat""", """remote""", """couch"""] ,top_k=_lowerCAmelCase ,) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, ] ,)
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase : int = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, } UpperCamelCase : Optional[Any] = { 'facebook/bart-base': 10_24, 'facebook/bart-large': 10_24, 'facebook/bart-large-mnli': 10_24, 'facebook/bart-large-cnn': 10_24, 'facebook/bart-large-xsum': 10_24, 'yjernite/bart_eli5': 10_24, } @lru_cache() def A__ ( ): lowerCamelCase__ = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowerCamelCase__ = bs[:] lowerCamelCase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowerCAmelCase ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ = [chr(__lowerCAmelCase ) for n in cs] return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = set() lowerCamelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ = char return pairs class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="replace" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else bos_token lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else eos_token lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else sep_token lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else cls_token lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else unk_token lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token super().__init__( errors=_lowerCAmelCase ,bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ,**_lowerCAmelCase ,) with open(_lowerCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCamelCase__ = json.load(_lowerCAmelCase ) lowerCamelCase__ = {v: k for k, v in self.encoder.items()} lowerCamelCase__ = errors # how to handle errors in decoding lowerCamelCase__ = bytes_to_unicode() lowerCamelCase__ = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCamelCase__ = merges_handle.read().split("""\n""" )[1:-1] lowerCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) ) lowerCamelCase__ = {} lowerCamelCase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def UpperCamelCase_ ( self ): return len(self.encoder ) def UpperCamelCase_ ( self ): return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.cache: return self.cache[token] lowerCamelCase__ = tuple(_lowerCAmelCase ) lowerCamelCase__ = get_pairs(_lowerCAmelCase ) if not pairs: return token while True: lowerCamelCase__ = min(_lowerCAmelCase ,key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ = bigram lowerCamelCase__ = [] lowerCamelCase__ = 0 while i < len(_lowerCAmelCase ): try: lowerCamelCase__ = word.index(_lowerCAmelCase ,_lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ = j if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ = tuple(_lowerCAmelCase ) lowerCamelCase__ = new_word if len(_lowerCAmelCase ) == 1: break else: lowerCamelCase__ = get_pairs(_lowerCAmelCase ) lowerCamelCase__ = """ """.join(_lowerCAmelCase ) lowerCamelCase__ = word return word def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] for token in re.findall(self.pat ,_lowerCAmelCase ): lowerCamelCase__ = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(""" """ ) ) return bpe_tokens def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.encoder.get(_lowerCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.decoder.get(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """""".join(_lowerCAmelCase ) lowerCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_lowerCAmelCase ,ensure_ascii=_lowerCAmelCase ) + """\n""" ) lowerCamelCase__ = 0 with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase__ = token_index writer.write(""" """.join(_lowerCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=False ,**_lowerCAmelCase ): lowerCamelCase__ = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ = """ """ + text return (text, kwargs)
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = 42 class UpperCamelCase__ (nn.Module ): '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = (16, 32, 96, 256) _UpperCamelCase = jnp.floataa def UpperCamelCase_ ( self ): lowerCamelCase__ = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) lowerCamelCase__ = [] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase__ = self.block_out_channels[i] lowerCamelCase__ = self.block_out_channels[i + 1] lowerCamelCase__ = nn.Conv( _lowerCAmelCase ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(_lowerCAmelCase ) lowerCamelCase__ = nn.Conv( _lowerCAmelCase ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(_lowerCAmelCase ) lowerCamelCase__ = blocks lowerCamelCase__ = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,_lowerCAmelCase ): lowerCamelCase__ = self.conv_in(_lowerCAmelCase ) lowerCamelCase__ = nn.silu(_lowerCAmelCase ) for block in self.blocks: lowerCamelCase__ = block(_lowerCAmelCase ) lowerCamelCase__ = nn.silu(_lowerCAmelCase ) lowerCamelCase__ = self.conv_out(_lowerCAmelCase ) return embedding @flax_register_to_config class UpperCamelCase__ (nn.Module ,a ,a ): '''simple docstring''' _UpperCamelCase = 32 _UpperCamelCase = 4 _UpperCamelCase = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _UpperCamelCase = False _UpperCamelCase = (320, 640, 1280, 1280) _UpperCamelCase = 2 _UpperCamelCase = 8 _UpperCamelCase = None _UpperCamelCase = 1280 _UpperCamelCase = 0.0 _UpperCamelCase = False _UpperCamelCase = jnp.floataa _UpperCamelCase = True _UpperCamelCase = 0 _UpperCamelCase = "rgb" _UpperCamelCase = (16, 32, 96, 256) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # init input tensors lowerCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ = jnp.zeros(_lowerCAmelCase ,dtype=jnp.floataa ) lowerCamelCase__ = jnp.ones((1,) ,dtype=jnp.intaa ) lowerCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) lowerCamelCase__ = (1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase__ = jnp.zeros(_lowerCAmelCase ,dtype=jnp.floataa ) lowerCamelCase__ , lowerCamelCase__ = jax.random.split(_lowerCAmelCase ) lowerCamelCase__ = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )["params"] def UpperCamelCase_ ( self ): lowerCamelCase__ = self.block_out_channels lowerCamelCase__ = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ = self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time lowerCamelCase__ = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) lowerCamelCase__ = FlaxTimestepEmbedding(_lowerCAmelCase ,dtype=self.dtype ) lowerCamelCase__ = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) lowerCamelCase__ = self.only_cross_attention if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = (num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = block_out_channels[0] lowerCamelCase__ = nn.Conv( _lowerCAmelCase ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(_lowerCAmelCase ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ = output_channel lowerCamelCase__ = block_out_channels[i] lowerCamelCase__ = i == len(_lowerCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ = FlaxCrossAttnDownBlockaD( in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: lowerCamelCase__ = FlaxDownBlockaD( in_channels=_lowerCAmelCase ,out_channels=_lowerCAmelCase ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(_lowerCAmelCase ) for _ in range(self.layers_per_block ): lowerCamelCase__ = nn.Conv( _lowerCAmelCase ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(_lowerCAmelCase ) if not is_final_block: lowerCamelCase__ = nn.Conv( _lowerCAmelCase ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(_lowerCAmelCase ) lowerCamelCase__ = down_blocks lowerCamelCase__ = controlnet_down_blocks # mid lowerCamelCase__ = block_out_channels[-1] lowerCamelCase__ = FlaxUNetMidBlockaDCrossAttn( in_channels=_lowerCAmelCase ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) lowerCamelCase__ = nn.Conv( _lowerCAmelCase ,kernel_size=(1, 1) ,padding="""VALID""" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1.0 ,_lowerCAmelCase = True ,_lowerCAmelCase = False ,): lowerCamelCase__ = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase__ = jnp.flip(_lowerCAmelCase ,axis=1 ) # 1. time if not isinstance(_lowerCAmelCase ,jnp.ndarray ): lowerCamelCase__ = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(_lowerCAmelCase ,jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ = timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ = jnp.expand_dims(_lowerCAmelCase ,0 ) lowerCamelCase__ = self.time_proj(_lowerCAmelCase ) lowerCamelCase__ = self.time_embedding(_lowerCAmelCase ) # 2. pre-process lowerCamelCase__ = jnp.transpose(_lowerCAmelCase ,(0, 2, 3, 1) ) lowerCamelCase__ = self.conv_in(_lowerCAmelCase ) lowerCamelCase__ = jnp.transpose(_lowerCAmelCase ,(0, 2, 3, 1) ) lowerCamelCase__ = self.controlnet_cond_embedding(_lowerCAmelCase ) sample += controlnet_cond # 3. down lowerCamelCase__ = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = down_block(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,deterministic=not train ) else: lowerCamelCase__ , lowerCamelCase__ = down_block(_lowerCAmelCase ,_lowerCAmelCase ,deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase__ = self.mid_block(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,deterministic=not train ) # 5. contronet blocks lowerCamelCase__ = () for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase ,self.controlnet_down_blocks ): lowerCamelCase__ = controlnet_block(_lowerCAmelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ = controlnet_down_block_res_samples lowerCamelCase__ = self.controlnet_mid_block(_lowerCAmelCase ) # 6. scaling lowerCamelCase__ = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_lowerCAmelCase ,mid_block_res_sample=_lowerCAmelCase )
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : int = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'vit_msn' def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-06 ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=16 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = qkv_bias
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCamelCase : List[str] = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" ,_lowerCAmelCase ,) super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if exponent == 1: return base if exponent % 2 == 0: lowerCamelCase__ = _modexpt(__lowerCAmelCase , exponent // 2 , __lowerCAmelCase ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__lowerCAmelCase , exponent - 1 , __lowerCAmelCase )) % modulo_value def A__ ( __lowerCAmelCase : int = 1777 , __lowerCAmelCase : int = 1855 , __lowerCAmelCase : int = 8 ): lowerCamelCase__ = base for _ in range(1 , __lowerCAmelCase ): lowerCamelCase__ = _modexpt(__lowerCAmelCase , __lowerCAmelCase , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Tuple = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = [[0 for _ in range(__lowerCAmelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): lowerCamelCase__ = 1 for n in range(m + 1 ): for k in range(1 , __lowerCAmelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: UpperCamelCase : Optional[int] = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: UpperCamelCase : int = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCamelCase_ ( self ): lowerCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) lowerCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) lowerCamelCase__ = """xvjiarui/stable-diffusion-2-inpainting""" lowerCamelCase__ , lowerCamelCase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase ,safety_checker=_lowerCAmelCase ) lowerCamelCase__ = """Face of a yellow cat, high resolution, sitting on a park bench""" lowerCamelCase__ = jax.random.PRNGKey(0 ) lowerCamelCase__ = 50 lowerCamelCase__ = jax.device_count() lowerCamelCase__ = num_samples * [prompt] lowerCamelCase__ = num_samples * [init_image] lowerCamelCase__ = num_samples * [mask_image] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = pipeline.prepare_inputs(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # shard inputs and rng lowerCamelCase__ = replicate(_lowerCAmelCase ) lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,jax.device_count() ) lowerCamelCase__ = shard(_lowerCAmelCase ) lowerCamelCase__ = shard(_lowerCAmelCase ) lowerCamelCase__ = shard(_lowerCAmelCase ) lowerCamelCase__ = pipeline( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,jit=_lowerCAmelCase ) lowerCamelCase__ = output.images.reshape(_lowerCAmelCase ,5_12 ,5_12 ,3 ) lowerCamelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1] lowerCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase__ = jnp.array( [0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' import argparse from collections import defaultdict def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = F'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(__lowerCAmelCase , """r""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = F'''class {class_name}(''' lowerCamelCase__ = F'''{4 * " "}def {test_name}(''' lowerCamelCase__ = F'''{8 * " "}{correct_line.split()[0]}''' lowerCamelCase__ = F'''{16 * " "}{correct_line.split()[0]}''' lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] for line in lines: if line.startswith(__lowerCAmelCase ): lowerCamelCase__ = True elif in_class and line.startswith(__lowerCAmelCase ): lowerCamelCase__ = True elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )): lowerCamelCase__ = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: lowerCamelCase__ = True if in_class and in_func and in_line: if ")" not in line: continue else: lowerCamelCase__ = True if in_class and in_func and in_line and insert_line: new_lines.append(F'''{spaces * " "}{correct_line}''' ) lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = lowerCamelCase__ = False else: new_lines.append(__lowerCAmelCase ) with open(__lowerCAmelCase , """w""" ) as f: for line in new_lines: f.write(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[str]=None ): if fail is not None: with open(__lowerCAmelCase , """r""" ) as f: lowerCamelCase__ = {l.strip() for l in f.readlines()} else: lowerCamelCase__ = None with open(__lowerCAmelCase , """r""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = defaultdict(__lowerCAmelCase ) for line in correct_lines: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) UpperCamelCase : str = parser.parse_args() main(args.correct_filename, args.fail_filename)
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) lowerCamelCase__ = MaskFormerConfig(backbone_config=__lowerCAmelCase ) lowerCamelCase__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok lowerCamelCase__ = 847 lowerCamelCase__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok lowerCamelCase__ = 150 lowerCamelCase__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok lowerCamelCase__ = 171 lowerCamelCase__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO lowerCamelCase__ = 133 lowerCamelCase__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok lowerCamelCase__ = 19 lowerCamelCase__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok lowerCamelCase__ = 65 lowerCamelCase__ = """mapillary-vistas-id2label.json""" lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} return config def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str ): lowerCamelCase__ = dct.pop(__lowerCAmelCase ) lowerCamelCase__ = val def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ): lowerCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCamelCase__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[:dim, :] lowerCamelCase__ = in_proj_bias[: dim] lowerCamelCase__ = in_proj_weight[ dim : dim * 2, : ] lowerCamelCase__ = in_proj_bias[ dim : dim * 2 ] lowerCamelCase__ = in_proj_weight[ -dim :, : ] lowerCamelCase__ = in_proj_bias[-dim :] # fmt: on def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ): # fmt: off lowerCamelCase__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) lowerCamelCase__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: hidden_size, :] lowerCamelCase__ = in_proj_bias[:config.hidden_size] lowerCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :] lowerCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2] lowerCamelCase__ = in_proj_weight[-hidden_size :, :] lowerCamelCase__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) lowerCamelCase__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: hidden_size, :] lowerCamelCase__ = in_proj_bias[:config.hidden_size] lowerCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :] lowerCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2] lowerCamelCase__ = in_proj_weight[-hidden_size :, :] lowerCamelCase__ = in_proj_bias[-hidden_size :] # fmt: on def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ): lowerCamelCase__ = get_maskformer_config(__lowerCAmelCase ) # load original state_dict with open(__lowerCAmelCase , """rb""" ) as f: lowerCamelCase__ = pickle.load(__lowerCAmelCase ) lowerCamelCase__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys lowerCamelCase__ = create_rename_keys(__lowerCAmelCase ) for src, dest in rename_keys: rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # update to torch tensors for key, value in state_dict.items(): lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ) # load 🤗 model lowerCamelCase__ = MaskFormerForInstanceSegmentation(__lowerCAmelCase ) model.eval() for name, param in model.named_parameters(): print(__lowerCAmelCase , param.shape ) lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__lowerCAmelCase ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results lowerCamelCase__ = prepare_img() if "vistas" in model_name: lowerCamelCase__ = 65 elif "cityscapes" in model_name: lowerCamelCase__ = 6_5535 else: lowerCamelCase__ = 255 lowerCamelCase__ = True if """ade""" in model_name else False lowerCamelCase__ = MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase ) lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) lowerCamelCase__ = model(**__lowerCAmelCase ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": lowerCamelCase__ = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCamelCase : Optional[Any] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = ['image_processor', 'tokenizer'] _UpperCamelCase = 'BlipImageProcessor' _UpperCamelCase = 'AutoTokenizer' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) # add QFormer tokenizer lowerCamelCase__ = qformer_tokenizer def __call__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) lowerCamelCase__ = BatchFeature() if text is not None: lowerCamelCase__ = self.tokenizer( text=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase ,stride=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,return_overflowing_tokens=_lowerCAmelCase ,return_special_tokens_mask=_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ,return_length=_lowerCAmelCase ,verbose=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ,) encoding.update(_lowerCAmelCase ) lowerCamelCase__ = self.qformer_tokenizer( text=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase ,stride=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,return_overflowing_tokens=_lowerCAmelCase ,return_special_tokens_mask=_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ,return_length=_lowerCAmelCase ,verbose=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = qformer_text_encoding.pop("""input_ids""" ) lowerCamelCase__ = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: lowerCamelCase__ = self.image_processor(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) encoding.update(_lowerCAmelCase ) return encoding def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase ,**_lowerCAmelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,**_lowerCAmelCase ): if os.path.isfile(_lowerCAmelCase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase ) lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(_lowerCAmelCase ) return super().save_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ): lowerCamelCase__ = AutoTokenizer.from_pretrained(_lowerCAmelCase ,subfolder="""qformer_tokenizer""" ) lowerCamelCase__ = cls._get_arguments_from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) args.append(_lowerCAmelCase ) return cls(*_lowerCAmelCase )
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): if number > 0: raise ValueError("""input must be a negative integer""" ) lowerCamelCase__ = len(bin(__lowerCAmelCase )[3:] ) lowerCamelCase__ = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:] lowerCamelCase__ = ( ( """1""" + """0""" * (binary_number_length - len(__lowerCAmelCase )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase : Dict = logging.get_logger(__name__) UpperCamelCase : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } UpperCamelCase : Optional[int] = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ): for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCamelCase__ = """lm_head""" lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) if weight_type is not None: lowerCamelCase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape else: lowerCamelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase__ = value elif weight_type == "weight_g": lowerCamelCase__ = value elif weight_type == "weight_v": lowerCamelCase__ = value elif weight_type == "bias": lowerCamelCase__ = value else: lowerCamelCase__ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): lowerCamelCase__ = [] lowerCamelCase__ = fairseq_model.state_dict() lowerCamelCase__ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__ = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase__ = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__ = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase__ = True if "*" in mapped_key: lowerCamelCase__ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2] lowerCamelCase__ = mapped_key.replace("""*""" , __lowerCAmelCase ) if "weight_g" in name: lowerCamelCase__ = """weight_g""" elif "weight_v" in name: lowerCamelCase__ = """weight_v""" elif "bias" in name: lowerCamelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__ = """weight""" else: lowerCamelCase__ = None set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): lowerCamelCase__ = full_name.split("""conv_layers.""" )[-1] lowerCamelCase__ = name.split(""".""" ) lowerCamelCase__ = int(items[0] ) lowerCamelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=True ): if config_path is not None: lowerCamelCase__ = UniSpeechConfig.from_pretrained(__lowerCAmelCase ) else: lowerCamelCase__ = UniSpeechConfig() if is_finetuned: if dict_path: lowerCamelCase__ = Dictionary.load_from_json(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__ = target_dict.pad_index lowerCamelCase__ = target_dict.bos_index lowerCamelCase__ = target_dict.eos_index lowerCamelCase__ = len(target_dict.symbols ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """vocab.json""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) lowerCamelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__ = 42 lowerCamelCase__ = 43 with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = WavaVecaPhonemeCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , ) lowerCamelCase__ = True if config.feat_extract_norm == """layer""" else False lowerCamelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) lowerCamelCase__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) lowerCamelCase__ = UniSpeechForCTC(__lowerCAmelCase ) else: lowerCamelCase__ = UniSpeechForPreTraining(__lowerCAmelCase ) if is_finetuned: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCamelCase__ = model[0].eval() recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) hf_unispeech.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) UpperCamelCase : Tuple = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod else: lowerCamelCase__ = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase ) return (b * b) % mod # a prime number UpperCamelCase : Any = 7_01 UpperCamelCase : Optional[Any] = 10_00_00_00_00 UpperCamelCase : Tuple = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""hidden_sizes""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""num_attention_heads""" ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase ,"""num_encoder_blocks""" ) ) class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=64 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=[2, 2, 2, 2] ,_lowerCAmelCase=[8, 4, 2, 1] ,_lowerCAmelCase=[16, 32, 64, 1_28] ,_lowerCAmelCase=[1, 4, 8, 16] ,_lowerCAmelCase=[1, 2, 4, 8] ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = num_encoder_blocks lowerCamelCase__ = sr_ratios lowerCamelCase__ = depths lowerCamelCase__ = hidden_sizes lowerCamelCase__ = downsampling_rates lowerCamelCase__ = num_attention_heads lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = scope def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return SegformerConfig( image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = SegformerModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) lowerCamelCase__ = lowerCamelCase__ = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = SegformerForSemanticSegmentation(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss ,0.0 ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = 1 lowerCamelCase__ = SegformerForSemanticSegmentation(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertGreater(result.loss ,0.0 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) _UpperCamelCase = ( { 'feature-extraction': SegformerModel, 'image-classification': SegformerForImageClassification, 'image-segmentation': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = SegformerModelTester(self ) lowerCamelCase__ = SegformerConfigTester(self ,config_class=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*_lowerCAmelCase ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = True for model_class in self.all_model_classes: lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.attentions lowerCamelCase__ = sum(self.model_tester.depths ) self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # verify the first attentions (first block, first layer) lowerCamelCase__ = (self.model_tester.image_size // 4) ** 2 lowerCamelCase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,) # verify the last attentions (last block, last layer) lowerCamelCase__ = (self.model_tester.image_size // 32) ** 2 lowerCamelCase__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,) lowerCamelCase__ = len(_lowerCAmelCase ) # Check attention is always last and order is fine lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) self.assertEqual(out_len + 1 ,len(_lowerCAmelCase ) ) lowerCamelCase__ = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # verify the first attentions (first block, first layer) lowerCamelCase__ = (self.model_tester.image_size // 4) ** 2 lowerCamelCase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = outputs.hidden_states lowerCamelCase__ = self.model_tester.num_encoder_blocks self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) ,[ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] ,) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): if not self.model_tester.is_training: return lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = True for model_class in self.all_model_classes: if model_class in get_values(_lowerCAmelCase ): continue lowerCamelCase__ = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = SegformerModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ): # only resize + normalize lowerCamelCase__ = SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=_lowerCAmelCase ,align=_lowerCAmelCase ,do_random_crop=_lowerCAmelCase ) lowerCamelCase__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( _lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ) lowerCamelCase__ = encoded_inputs.pixel_values.to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ) lowerCamelCase__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow def UpperCamelCase_ ( self ): # only resize + normalize lowerCamelCase__ = SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=_lowerCAmelCase ,align=_lowerCAmelCase ,do_random_crop=_lowerCAmelCase ) lowerCamelCase__ = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ) lowerCamelCase__ = encoded_inputs.pixel_values.to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ) lowerCamelCase__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,_lowerCAmelCase ,atol=1E-1 ) ) @slow def UpperCamelCase_ ( self ): # only resize + normalize lowerCamelCase__ = SegformerImageProcessor( image_scale=(5_12, 5_12) ,keep_ratio=_lowerCAmelCase ,align=_lowerCAmelCase ,do_random_crop=_lowerCAmelCase ) lowerCamelCase__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( _lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ) lowerCamelCase__ = encoded_inputs.pixel_values.to(_lowerCAmelCase ) with torch.no_grad(): lowerCamelCase__ = model(_lowerCAmelCase ) lowerCamelCase__ = outputs.logits.detach().cpu() lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ,target_sizes=[(5_00, 3_00)] ) lowerCamelCase__ = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape ,_lowerCAmelCase ) lowerCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase ) lowerCamelCase__ = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape ,_lowerCAmelCase )
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) UpperCamelCase : List[str] = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) _UpperCamelCase = field(default=a ,metadata={'help': 'Whether tp freeze the encoder.'} ) _UpperCamelCase = field(default=a ,metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) _UpperCamelCase = field( default='summarization' ,metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} ,) _UpperCamelCase = field( default=1024 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) _UpperCamelCase = field( default=128 ,metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) _UpperCamelCase = field( default=142 ,metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } ,) _UpperCamelCase = field( default=142 ,metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) _UpperCamelCase = field(default=-1 ,metadata={'help': '# training examples. -1 means use all.'} ) _UpperCamelCase = field(default=-1 ,metadata={'help': '# validation examples. -1 means use all.'} ) _UpperCamelCase = field(default=-1 ,metadata={'help': '# test examples. -1 means use all.'} ) _UpperCamelCase = field(default=a ,metadata={'help': 'Source language id for translation.'} ) _UpperCamelCase = field(default=a ,metadata={'help': 'Target language id for translation.'} ) _UpperCamelCase = field(default=a ,metadata={'help': '# num_beams to use for evaluation.'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} ,) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''{split}_results.json''' ) ) def A__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCamelCase__ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) lowerCamelCase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowerCamelCase__ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowerCamelCase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowerCamelCase__ = SeqaSeqDataset # Get datasets lowerCamelCase__ = ( dataset_class( __lowerCAmelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowerCamelCase__ = ( dataset_class( __lowerCAmelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowerCamelCase__ = ( dataset_class( __lowerCAmelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowerCamelCase__ = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) lowerCamelCase__ = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) lowerCamelCase__ = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowerCamelCase__ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowerCamelCase__ = train_result.metrics lowerCamelCase__ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCamelCase__ = trainer.evaluate(metric_key_prefix="""val""" ) lowerCamelCase__ = data_args.n_val lowerCamelCase__ = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowerCamelCase__ = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix="""test""" ) lowerCamelCase__ = test_output.metrics lowerCamelCase__ = data_args.n_test if trainer.is_world_process_zero(): lowerCamelCase__ = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: lowerCamelCase__ = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) lowerCamelCase__ = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def A__ ( __lowerCAmelCase : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = len(__lowerCAmelCase ) # We need to create solution object to save path. lowerCamelCase__ = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )] lowerCamelCase__ = run_maze(__lowerCAmelCase , 0 , 0 , __lowerCAmelCase ) if solved: print("""\n""".join(str(__lowerCAmelCase ) for row in solutions ) ) else: print("""No solution exists!""" ) return solved def A__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[list[int]] ): lowerCamelCase__ = len(__lowerCAmelCase ) # Final check point. if i == j == (size - 1): lowerCamelCase__ = 1 return True lowerCamelCase__ = (not i < 0) and (not j < 0) # Check lower bounds lowerCamelCase__ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. lowerCamelCase__ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited lowerCamelCase__ = 1 # check for directions if ( run_maze(__lowerCAmelCase , i + 1 , __lowerCAmelCase , __lowerCAmelCase ) or run_maze(__lowerCAmelCase , __lowerCAmelCase , j + 1 , __lowerCAmelCase ) or run_maze(__lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase ) or run_maze(__lowerCAmelCase , __lowerCAmelCase , j - 1 , __lowerCAmelCase ) ): return True lowerCamelCase__ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(__lowerCAmelCase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' from __future__ import annotations import os from typing import Any import requests UpperCamelCase : int = 'https://api.github.com' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user UpperCamelCase : Optional[Any] = BASE_URL + '/user' # https://github.com/settings/tokens UpperCamelCase : int = os.environ.get('USER_TOKEN', '') def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = { """Authorization""": F'''token {auth_token}''', """Accept""": """application/vnd.github.v3+json""", } return requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F'{key}: {value}') else: raise ValueError('\'USER_TOKEN\' field cannot be empty.')
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowerCamelCase__ = i + 1 else: lowerCamelCase__ = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : Optional[int] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : str ): lowerCamelCase__ = DPTConfig() if "large" in checkpoint_url: lowerCamelCase__ = 1024 lowerCamelCase__ = 4096 lowerCamelCase__ = 24 lowerCamelCase__ = 16 lowerCamelCase__ = [5, 11, 17, 23] lowerCamelCase__ = [256, 512, 1024, 1024] lowerCamelCase__ = (1, 384, 384) if "ade" in checkpoint_url: lowerCamelCase__ = True lowerCamelCase__ = 150 lowerCamelCase__ = """huggingface/label-files""" lowerCamelCase__ = """ade20k-id2label.json""" lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ = idalabel lowerCamelCase__ = {v: k for k, v in idalabel.items()} lowerCamelCase__ = [1, 150, 480, 480] return config, expected_shape def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Dict ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase__ = name.replace("""patch_embed""" , """patch_embeddings""" ) if "pos_embed" in name: lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase__ = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase__ = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name: lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase__ = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowerCamelCase__ = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase__ = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" ) return name def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ = in_proj_weight[: config.hidden_size, :] lowerCamelCase__ = in_proj_bias[: config.hidden_size] lowerCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase__ = in_proj_bias[-config.hidden_size :] def A__ ( ): lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase ) # load original state_dict from URL lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(__lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) lowerCamelCase__ = val # read in qkv matrices read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase ) # load HuggingFace model lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() # Check outputs on an image lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ) # forward pass lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth # Assert logits lowerCamelCase__ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: lowerCamelCase__ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(__lowerCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , __lowerCAmelCase ) ) Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print("""Pushing model to hub...""" ) model.push_to_hub( repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__lowerCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__lowerCAmelCase , ) if __name__ == "__main__": UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) UpperCamelCase : List[str] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = abs(__lowerCAmelCase ) lowerCamelCase__ = 0 while n > 0: res += n % 10 n //= 10 return res def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = abs(__lowerCAmelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def A__ ( __lowerCAmelCase : int ): return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) ) def A__ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None: lowerCamelCase__ = F'''{func.__name__}({value})''' lowerCamelCase__ = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) print(F'''{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds''' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ): lowerCamelCase__ = np.random.RandomState(_lowerCAmelCase ) lowerCamelCase__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCamelCase__ = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCamelCase__ = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCamelCase__ = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCamelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCamelCase__ = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCamelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCamelCase__ = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCamelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCamelCase__ = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCamelCase__ = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = 3 * [inputs["""prompt"""]] # forward lowerCamelCase__ = pipe(**_lowerCAmelCase ) lowerCamelCase__ = output.images[0, -3:, -3:, -1] lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = 3 * [inputs.pop("""prompt""" )] lowerCamelCase__ = pipe.tokenizer( _lowerCAmelCase ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""np""" ,) lowerCamelCase__ = text_inputs["""input_ids"""] lowerCamelCase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowerCamelCase__ = prompt_embeds # forward lowerCamelCase__ = pipe(**_lowerCAmelCase ) lowerCamelCase__ = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = 3 * ["""this is a negative prompt"""] lowerCamelCase__ = negative_prompt lowerCamelCase__ = 3 * [inputs["""prompt"""]] # forward lowerCamelCase__ = pipe(**_lowerCAmelCase ) lowerCamelCase__ = output.images[0, -3:, -3:, -1] lowerCamelCase__ = self.get_dummy_inputs() lowerCamelCase__ = 3 * [inputs.pop("""prompt""" )] lowerCamelCase__ = [] for p in [prompt, negative_prompt]: lowerCamelCase__ = pipe.tokenizer( _lowerCAmelCase ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""np""" ,) lowerCamelCase__ = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowerCamelCase__ , lowerCamelCase__ = embeds # forward lowerCamelCase__ = pipe(**_lowerCAmelCase ) lowerCamelCase__ = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self ): lowerCamelCase__ = ort.SessionOptions() lowerCamelCase__ = False return options def UpperCamelCase_ ( self ): # using the PNDM scheduler by default lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """A painting of a squirrel eating a burger""" np.random.seed(0 ) lowerCamelCase__ = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCamelCase__ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """open neural network exchange""" lowerCamelCase__ = np.random.RandomState(0 ) lowerCamelCase__ = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_lowerCAmelCase ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCamelCase__ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """open neural network exchange""" lowerCamelCase__ = np.random.RandomState(0 ) lowerCamelCase__ = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_lowerCAmelCase ,output_type="""np""" ) lowerCamelCase__ = output.images lowerCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCamelCase__ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = 0 def test_callback_fn(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> None: lowerCamelCase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowerCamelCase__ = latents[0, -3:, -3:, -1] lowerCamelCase__ = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowerCamelCase__ = latents[0, -3:, -3:, -1] lowerCamelCase__ = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 lowerCamelCase__ = False lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = """Andromeda galaxy in a bottle""" lowerCamelCase__ = np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_lowerCAmelCase ,callback=_lowerCAmelCase ,callback_steps=1 ,) assert test_callback_fn.has_been_called assert number_of_steps == 6 def UpperCamelCase_ ( self ): lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) assert isinstance(_lowerCAmelCase ,_lowerCAmelCase ) assert pipe.safety_checker is None lowerCamelCase__ = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCamelCase__ = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 ,_lowerCAmelCase=3 ,_lowerCAmelCase=10 ,_lowerCAmelCase=[10, 20, 30, 40] ,_lowerCAmelCase=[1, 1, 2, 1] ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="relu" ,_lowerCAmelCase=3 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = embeddings_size lowerCamelCase__ = hidden_sizes lowerCamelCase__ = depths lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_act lowerCamelCase__ = num_labels lowerCamelCase__ = scope lowerCamelCase__ = len(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_labels ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFRegNetModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = TFRegNetForImageClassification(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () _UpperCamelCase = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFRegNetModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ) def UpperCamelCase_ ( self ): return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,) @slow def UpperCamelCase_ ( self ): super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): def check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = model(**self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ,training=_lowerCAmelCase ) lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ = self.model_tester.num_stages self.assertEqual(len(_lowerCAmelCase ) ,expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase__ = layer_type lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ = True check_hidden_states_output(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase={} ): lowerCamelCase__ = model(_lowerCAmelCase ,return_dict=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,return_dict=_lowerCAmelCase ,**_lowerCAmelCase ).to_tuple() def recursive_check(_lowerCAmelCase ,_lowerCAmelCase ): if isinstance(_lowerCAmelCase ,(List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase ,_lowerCAmelCase ): recursive_check(_lowerCAmelCase ,_lowerCAmelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_lowerCAmelCase ,_lowerCAmelCase ) ) ,msg=( """Tuple and dict output are not equal. Difference:""" F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) ,) recursive_check(_lowerCAmelCase ,_lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,{"""output_hidden_states""": True} ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase ) check_equivalence(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,{"""output_hidden_states""": True} ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFRegNetModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,training=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCamelCase : List[Any] = logging.get_logger(__name__) @dataclass class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self ,**_lowerCAmelCase ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCamelCase__ = deprecated_arg[3:] lowerCamelCase__ = not kwargs.pop(_lowerCAmelCase ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) lowerCamelCase__ = kwargs.pop("""tpu_name""" ,self.tpu_name ) lowerCamelCase__ = kwargs.pop("""device_idx""" ,self.device_idx ) lowerCamelCase__ = kwargs.pop("""eager_mode""" ,self.eager_mode ) lowerCamelCase__ = kwargs.pop("""use_xla""" ,self.use_xla ) super().__init__(**_lowerCAmelCase ) _UpperCamelCase = field( default=a ,metadata={'help': 'Name of TPU'} ,) _UpperCamelCase = field( default=0 ,metadata={'help': 'CPU / GPU device index. Defaults to 0.'} ,) _UpperCamelCase = field(default=a ,metadata={'help': 'Benchmark models in eager model.'} ) _UpperCamelCase = field( default=a ,metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } ,) @cached_property def UpperCamelCase_ ( self ): requires_backends(self ,["""tf"""] ) lowerCamelCase__ = None if self.tpu: try: if self.tpu_name: lowerCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowerCamelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowerCamelCase__ = None return tpu @cached_property def UpperCamelCase_ ( self ): requires_backends(self ,["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowerCamelCase__ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"""GPU""" ) lowerCamelCase__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] ,"""GPU""" ) # disable GPU lowerCamelCase__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def UpperCamelCase_ ( self ): requires_backends(self ,["""tf"""] ) return self._setup_tpu is not None @property def UpperCamelCase_ ( self ): requires_backends(self ,["""tf"""] ) return self._setup_strategy @property def UpperCamelCase_ ( self ): requires_backends(self ,["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def UpperCamelCase_ ( self ): requires_backends(self ,["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCamelCase_ ( self ): return self.n_gpu > 0
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase : Union[str, Any] = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = ['ConvNextFeatureExtractor'] UpperCamelCase : Any = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) lowerCamelCase__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): lowerCamelCase__ = 0 lowerCamelCase__ = len(__lowerCAmelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCamelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__lowerCAmelCase ): return None lowerCamelCase__ = sorted_collection[point] if current_item == item: return point else: if point < left: lowerCamelCase__ = left lowerCamelCase__ = point elif point > right: lowerCamelCase__ = right lowerCamelCase__ = point else: if item < current_item: lowerCamelCase__ = point - 1 else: lowerCamelCase__ = point + 1 return None def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCamelCase__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__lowerCAmelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) elif point > right: return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , point - 1 ) else: return interpolation_search_by_recursion( __lowerCAmelCase , __lowerCAmelCase , point + 1 , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : str ): if collection != sorted(__lowerCAmelCase ): raise ValueError("""Collection must be ascending sorted""" ) return True if __name__ == "__main__": import sys UpperCamelCase : List[str] = 0 if debug == 1: UpperCamelCase : Dict = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') UpperCamelCase : Optional[int] = 67 UpperCamelCase : str = interpolation_search(collection, target) if result is not None: print(F'{target} found at positions: {result}') else: print('Not found')
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = 42 class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = [[] for _ in range(_lowerCAmelCase )] lowerCamelCase__ = size def __getitem__( self ,_lowerCAmelCase ): return iter(self._graph[vertex] ) @property def UpperCamelCase_ ( self ): return self._size def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_lowerCAmelCase ,_lowerCAmelCase ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = deque([start_vertex] ) lowerCamelCase__ = [None] * self.size lowerCamelCase__ = 0 while queue: lowerCamelCase__ = queue.popleft() lowerCamelCase__ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCamelCase__ = current_distance + edge.weight lowerCamelCase__ = distances[edge.destination_vertex] if ( isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and new_distance >= dest_vertex_distance ): continue lowerCamelCase__ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax UpperCamelCase : Dict = logging.get_logger(__name__) @add_end_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,**_lowerCAmelCase ): super().__init__(**_lowerCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,_lowerCAmelCase ,**_lowerCAmelCase ): return super().__call__(_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = {} if "candidate_labels" in kwargs: lowerCamelCase__ = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCamelCase__ = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase="This is a photo of {}." ): lowerCamelCase__ = load_image(_lowerCAmelCase ) lowerCamelCase__ = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCamelCase__ = candidate_labels lowerCamelCase__ = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels] lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,return_tensors=self.framework ,padding=_lowerCAmelCase ) lowerCamelCase__ = [text_inputs] return inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = model_inputs.pop("""candidate_labels""" ) lowerCamelCase__ = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,_lowerCAmelCase ): lowerCamelCase__ = text_inputs[0] else: # Batching case. lowerCamelCase__ = text_inputs[0][0] lowerCamelCase__ = self.model(**_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = model_outputs.pop("""candidate_labels""" ) lowerCamelCase__ = model_outputs["""logits"""][0] if self.framework == "pt": lowerCamelCase__ = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCamelCase__ = probs.tolist() if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [scores] elif self.framework == "tf": lowerCamelCase__ = stable_softmax(_lowerCAmelCase ,axis=-1 ) lowerCamelCase__ = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowerCamelCase__ = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(_lowerCAmelCase ,_lowerCAmelCase ) ,key=lambda _lowerCAmelCase : -x[0] ) ] return result
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' from __future__ import annotations from typing import Any def A__ ( __lowerCAmelCase : list ): if not postfix_notation: return 0 lowerCamelCase__ = {"""+""", """-""", """*""", """/"""} lowerCamelCase__ = [] for token in postfix_notation: if token in operations: lowerCamelCase__ , lowerCamelCase__ = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(__lowerCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCamelCase : str = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def A__ ( ): lowerCamelCase__ = os.path.dirname(os.path.realpath(__lowerCAmelCase ) ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """words.txt""" ) lowerCamelCase__ = """""" with open(__lowerCAmelCase ) as f: lowerCamelCase__ = f.readline() lowerCamelCase__ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] lowerCamelCase__ = [ word for word in [sum(ord(__lowerCAmelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__lowerCAmelCase ) if __name__ == "__main__": print(solution())
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase__ : '''simple docstring''' @property def UpperCamelCase_ ( self ): return self.get_dummy_input() @property def UpperCamelCase_ ( self ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def UpperCamelCase_ ( self ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,): lowerCamelCase__ = 4 lowerCamelCase__ = 32 lowerCamelCase__ = (32, 32) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = torch.device(_lowerCAmelCase ) lowerCamelCase__ = (batch_size, num_channels) + sizes lowerCamelCase__ = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ) lowerCamelCase__ = {"""hidden_states""": hidden_states} if include_temb: lowerCamelCase__ = 1_28 lowerCamelCase__ = randn_tensor((batch_size, temb_channels) ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ) if include_res_hidden_states_tuple: lowerCamelCase__ = torch.manual_seed(1 ) lowerCamelCase__ = (randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ),) if include_encoder_hidden_states: lowerCamelCase__ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase ) if include_skip_sample: lowerCamelCase__ = randn_tensor(((batch_size, 3) + sizes) ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ) return dummy_input def UpperCamelCase_ ( self ): lowerCamelCase__ = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 1_28, } if self.block_type == "up": lowerCamelCase__ = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) lowerCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ = self.prepare_init_args_and_inputs_for_common() lowerCamelCase__ = self.block_class(**_lowerCAmelCase ) unet_block.to(_lowerCAmelCase ) unet_block.eval() with torch.no_grad(): lowerCamelCase__ = unet_block(**_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = output[0] self.assertEqual(output.shape ,self.output_shape ) lowerCamelCase__ = output[0, -1, -3:, -3:] lowerCamelCase__ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase ) assert torch_all_close(output_slice.flatten() ,_lowerCAmelCase ,atol=5E-3 ) @unittest.skipIf(torch_device == """mps""" ,"""Training is not supported in mps""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.prepare_init_args_and_inputs_for_common() lowerCamelCase__ = self.block_class(**_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.train() lowerCamelCase__ = model(**_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = output[0] lowerCamelCase__ = torch.device(_lowerCAmelCase ) lowerCamelCase__ = randn_tensor(output.shape ,device=_lowerCAmelCase ) lowerCamelCase__ = torch.nn.functional.mse_loss(_lowerCAmelCase ,_lowerCAmelCase ) loss.backward()
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase : List[Any] = 25_60_47 UpperCamelCase : Dict = 25_61_45 @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = NllbTokenizer _UpperCamelCase = NllbTokenizerFast _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = {} def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = NllbTokenizer(_lowerCAmelCase ,keep_accents=_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = NllbTokenizer(_lowerCAmelCase ,keep_accents=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = self.tokenizer_class.from_pretrained(_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowerCamelCase__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(_lowerCAmelCase ,_lowerCAmelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase ,_lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase ,legacy_format=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCAmelCase ,_lowerCAmelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase ,_lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase ,legacy_format=_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase ,_lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) @require_torch def UpperCamelCase_ ( self ): if not self.test_seqaseq: return lowerCamelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. lowerCamelCase__ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] lowerCamelCase__ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: lowerCamelCase__ = tokenizer.prepare_seqaseq_batch( src_texts=_lowerCAmelCase ,tgt_texts=_lowerCAmelCase ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,10 ) # max_target_length will default to max_length if not specified lowerCamelCase__ = tokenizer.prepare_seqaseq_batch( _lowerCAmelCase ,tgt_texts=_lowerCAmelCase ,max_length=3 ,return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) lowerCamelCase__ = tokenizer.prepare_seqaseq_batch( src_texts=_lowerCAmelCase ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn("""decoder_input_ids""" ,_lowerCAmelCase ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = [AddedToken("""<special>""" ,lstrip=_lowerCAmelCase )] lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = tokenizer_r.encode("""Hey this is a <special> token""" ) lowerCamelCase__ = tokenizer_r.encode("""<special>""" ,add_special_tokens=_lowerCAmelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = self.tokenizer_class.from_pretrained( _lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,**_lowerCAmelCase ) lowerCamelCase__ = tokenizer_p.encode("""Hey this is a <special> token""" ) lowerCamelCase__ = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' _UpperCamelCase = 'facebook/nllb-200-distilled-600M' _UpperCamelCase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _UpperCamelCase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _UpperCamelCase = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def UpperCamelCase_ ( cls ): lowerCamelCase__ = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ) lowerCamelCase__ = 1 return cls def UpperCamelCase_ ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] ,25_60_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] ,25_60_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] ,25_60_57 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.assertIn(_lowerCAmelCase ,self.tokenizer.all_special_ids ) # fmt: off lowerCamelCase__ = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47] # fmt: on lowerCamelCase__ = self.tokenizer.decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] ,_lowerCAmelCase ) lowerCamelCase__ = 10 lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,_lowerCAmelCase ) self.assertEqual(len(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[25_62_03, 3] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCAmelCase ) lowerCamelCase__ = NllbTokenizer.from_pretrained(_lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCAmelCase ) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) lowerCamelCase__ = shift_tokens_right( batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase ) self.assertEqual((2, 15) ,batch.input_ids.shape ) self.assertEqual((2, 15) ,batch.attention_mask.shape ) lowerCamelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer(self.src_text ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=3 ,return_tensors="""pt""" ) lowerCamelCase__ = self.tokenizer( text_target=self.tgt_text ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=10 ,return_tensors="""pt""" ) lowerCamelCase__ = targets["""input_ids"""] lowerCamelCase__ = shift_tokens_right( _lowerCAmelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(_lowerCAmelCase ) ,{ # A, test, EOS, en_XX """input_ids""": [[25_60_47, 70, 73_56, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_60_57, } ,) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = True lowerCamelCase__ = self.tokenizer( """UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids ,[1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] ) lowerCamelCase__ = False lowerCamelCase__ = self.tokenizer( """UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids ,[25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase : int = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' import csv import tweepy # Twitter API credentials UpperCamelCase : Dict = '' UpperCamelCase : Optional[Any] = '' UpperCamelCase : int = '' UpperCamelCase : Optional[int] = '' def A__ ( __lowerCAmelCase : str ): # authorize twitter, initialize tweepy lowerCamelCase__ = tweepy.OAuthHandler(__lowerCAmelCase , __lowerCAmelCase ) auth.set_access_token(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = tweepy.API(__lowerCAmelCase ) # initialize a list to hold all the tweepy Tweets lowerCamelCase__ = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCamelCase__ = api.user_timeline(screen_name=__lowerCAmelCase , count=200 ) # save most recent tweets alltweets.extend(__lowerCAmelCase ) # save the id of the oldest tweet less one lowerCamelCase__ = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(__lowerCAmelCase ) > 0: print(F'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates lowerCamelCase__ = api.user_timeline( screen_name=__lowerCAmelCase , count=200 , max_id=__lowerCAmelCase ) # save most recent tweets alltweets.extend(__lowerCAmelCase ) # update the id of the oldest tweet less one lowerCamelCase__ = alltweets[-1].id - 1 print(F'''...{len(__lowerCAmelCase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCamelCase__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f: lowerCamelCase__ = csv.writer(__lowerCAmelCase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(__lowerCAmelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase : List[Any] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCamelCase : int = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'mask2former' _UpperCamelCase = ['swin'] _UpperCamelCase = {'hidden_size': 'hidden_dim'} def __init__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = 2_56 ,_lowerCAmelCase = 2_56 ,_lowerCAmelCase = 2_56 ,_lowerCAmelCase = 10_24 ,_lowerCAmelCase = "relu" ,_lowerCAmelCase = 6 ,_lowerCAmelCase = 10 ,_lowerCAmelCase = 8 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 20_48 ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = 4 ,_lowerCAmelCase = 2_55 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 2.0 ,_lowerCAmelCase = 5.0 ,_lowerCAmelCase = 5.0 ,_lowerCAmelCase = 1_25_44 ,_lowerCAmelCase = 3.0 ,_lowerCAmelCase = 0.75 ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 1.0 ,_lowerCAmelCase = True ,_lowerCAmelCase = [4, 8, 16, 32] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) lowerCamelCase__ = CONFIG_MAPPING["""swin"""]( image_size=2_24 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=_lowerCAmelCase ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = backbone_config.pop("""model_type""" ) lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase__ = config_class.from_dict(_lowerCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' F'''Supported model types: {",".join(self.backbones_supported )}''' ) lowerCamelCase__ = backbone_config lowerCamelCase__ = feature_size lowerCamelCase__ = mask_feature_size lowerCamelCase__ = hidden_dim lowerCamelCase__ = encoder_feedforward_dim lowerCamelCase__ = activation_function lowerCamelCase__ = encoder_layers lowerCamelCase__ = decoder_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = dropout lowerCamelCase__ = dim_feedforward lowerCamelCase__ = pre_norm lowerCamelCase__ = enforce_input_projection lowerCamelCase__ = common_stride lowerCamelCase__ = ignore_value lowerCamelCase__ = num_queries lowerCamelCase__ = no_object_weight lowerCamelCase__ = class_weight lowerCamelCase__ = mask_weight lowerCamelCase__ = dice_weight lowerCamelCase__ = train_num_points lowerCamelCase__ = oversample_ratio lowerCamelCase__ = importance_sample_ratio lowerCamelCase__ = init_std lowerCamelCase__ = init_xavier_std lowerCamelCase__ = use_auxiliary_loss lowerCamelCase__ = feature_strides lowerCamelCase__ = output_auxiliary_logits lowerCamelCase__ = decoder_layers super().__init__(**_lowerCAmelCase ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ): return cls( backbone_config=_lowerCAmelCase ,**_lowerCAmelCase ,) def UpperCamelCase_ ( self ): lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.backbone_config.to_dict() lowerCamelCase__ = self.__class__.model_type return output
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase : Optional[Any] = logging.getLogger(__name__) UpperCamelCase : Union[str, Any] = 'pytorch_model.bin' @dataclasses.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} ,) @dataclasses.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) _UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'A csv or a json file containing the validation data.'} ) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'The name of the task to train on.'} ,) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) _UpperCamelCase = dataclasses.field( default='accuracy' ,metadata={'help': 'The evaluation metric used for the task.'} ) _UpperCamelCase = dataclasses.field( default='no' ,metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } ,) _UpperCamelCase = dataclasses.field( default=10 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,) _UpperCamelCase = dataclasses.field( default=0.0 ,metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } ,) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} ,) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} ,) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} ,) _UpperCamelCase = dataclasses.field( default=0.0 ,metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} ,) _UpperCamelCase = dataclasses.field( default=100 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,) _UpperCamelCase = dataclasses.field( default=a ,metadata={'help': 'Random seed for initialization.'} ,) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: lowerCamelCase__ = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowerCamelCase__ = int(eval_result * len(__lowerCAmelCase ) ) print(__lowerCAmelCase ) lowerCamelCase__ = dataset.sort("""probability""" , reverse=__lowerCAmelCase ) lowerCamelCase__ = dataset.select(range(__lowerCAmelCase ) ) lowerCamelCase__ = dataset.remove_columns(["""label""", """probability"""] ) lowerCamelCase__ = dataset.rename_column("""prediction""" , """label""" ) lowerCamelCase__ = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} ) lowerCamelCase__ = dataset.shuffle(seed=args.seed ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase ) else: dataset.to_json(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ): lowerCamelCase__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowerCamelCase__ = STModelArguments(model_name_or_path=__lowerCAmelCase ) lowerCamelCase__ = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase ) lowerCamelCase__ = STTrainingArguments(output_dir=__lowerCAmelCase ) lowerCamelCase__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCAmelCase ).items(): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for key, value in kwargs.items(): if hasattr(__lowerCAmelCase , __lowerCAmelCase ): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Sanity checks lowerCamelCase__ = {} lowerCamelCase__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowerCamelCase__ = args.train_file lowerCamelCase__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowerCamelCase__ = args.eval_file for key in data_files: lowerCamelCase__ = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: lowerCamelCase__ = extension else: assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) lowerCamelCase__ = F'''{args.output_dir}/self-train_iter-{{}}'''.format lowerCamelCase__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) accelerator.wait_for_everyone() lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = 0 lowerCamelCase__ = False # Show the progress bar lowerCamelCase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): lowerCamelCase__ = data_dir_format(__lowerCAmelCase ) assert os.path.exists(__lowerCAmelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowerCamelCase__ = os.path.join(__lowerCAmelCase , """stage-1""" ) lowerCamelCase__ = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ): arguments_dict.update({key: value} ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """best-checkpoint""" , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __lowerCAmelCase , __lowerCAmelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __lowerCAmelCase ) finetune(**__lowerCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __lowerCAmelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowerCamelCase__ = os.path.join(__lowerCAmelCase , """best-checkpoint""" ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """stage-2""" ) # Update arguments_dict lowerCamelCase__ = model_path lowerCamelCase__ = data_files["""train"""] lowerCamelCase__ = current_output_dir lowerCamelCase__ = os.path.join(__lowerCAmelCase , """best-checkpoint""" , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __lowerCAmelCase , __lowerCAmelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __lowerCAmelCase ) finetune(**__lowerCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __lowerCAmelCase ) lowerCamelCase__ = iteration lowerCamelCase__ = data_dir_format(iteration + 1 ) lowerCamelCase__ = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , """best-checkpoint""" ) ) lowerCamelCase__ = config.idalabel lowerCamelCase__ = os.path.join(__lowerCAmelCase , """eval_results_best-checkpoint.json""" ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """test_results_best-checkpoint.json""" ) assert os.path.exists(__lowerCAmelCase ) with open(__lowerCAmelCase , """r""" ) as f: lowerCamelCase__ = float(json.load(__lowerCAmelCase )[args.eval_metric] ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(__lowerCAmelCase ) # Loading the dataset from local csv or json files. lowerCamelCase__ = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] lowerCamelCase__ = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(__lowerCAmelCase ): shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) accelerator.wait_for_everyone() lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: lowerCamelCase__ = eval_result if best_iteration is None: lowerCamelCase__ = new_iteration lowerCamelCase__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowerCamelCase__ = new_iteration lowerCamelCase__ = new_eval_result lowerCamelCase__ = 0 else: if new_eval_result == best_eval_result: lowerCamelCase__ = new_iteration lowerCamelCase__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowerCamelCase__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , __lowerCAmelCase ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(__lowerCAmelCase , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCAmelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__lowerCAmelCase , """eval_results_best-iteration.json""" ) , )
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__lowerCAmelCase , 2 ) + pow(__lowerCAmelCase , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ): lowerCamelCase__ = list(range(len(__lowerCAmelCase ) ) ) lowerCamelCase__ = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase ) lowerCamelCase__ = 0 lowerCamelCase__ = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: lowerCamelCase__ = 1 max_value += value[i] capacity -= weight[i] else: lowerCamelCase__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' import collections import os import re from pathlib import Path UpperCamelCase : Optional[Any] = 'src/transformers' # Matches is_xxx_available() UpperCamelCase : int = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} UpperCamelCase : int = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCamelCase : List[str] = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available UpperCamelCase : str = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") UpperCamelCase : Optional[Any] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCamelCase : Optional[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", UpperCamelCase : Optional[Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], UpperCamelCase : Any = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo UpperCamelCase : List[str] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: UpperCamelCase : Union[str, Any] = re.compile(r'^\s*try:') # Catches a line with else: UpperCamelCase : List[Any] = re.compile(r'^\s*else:') def A__ ( __lowerCAmelCase : Optional[int] ): if _re_test_backend.search(__lowerCAmelCase ) is None: return None lowerCamelCase__ = [b[0] for b in _re_backend.findall(__lowerCAmelCase )] backends.sort() return "_and_".join(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = 0 while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCamelCase__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: lowerCamelCase__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__lowerCAmelCase ): lowerCamelCase__ = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0] lowerCamelCase__ = re.findall(R"""\[([^\]]+)\]""" , __lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue lowerCamelCase__ = _re_import_struct_key_value.search(__lowerCAmelCase ) if single_line_import_search is not None: lowerCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 lowerCamelCase__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCamelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCamelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): lowerCamelCase__ = lines[line_index] if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None: lowerCamelCase__ = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ ) lowerCamelCase__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_between_brackets.search(__lowerCAmelCase ) is not None: lowerCamelCase__ = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ ) lowerCamelCase__ = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0] objects.extend(__lowerCAmelCase ) elif _re_quote_object.search(__lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 lowerCamelCase__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCamelCase__ = [] while ( line_index < len(__lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCamelCase__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(__lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCamelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCamelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCamelCase__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : str ): def find_duplicates(__lowerCAmelCase : Any ): return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCamelCase__ = [] for key in import_dict_objects.keys(): lowerCamelCase__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) lowerCamelCase__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCamelCase__ = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def A__ ( ): lowerCamelCase__ = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: lowerCamelCase__ = os.path.join(__lowerCAmelCase , """__init__.py""" ) lowerCamelCase__ = parse_init(__lowerCAmelCase ) if objects is not None: lowerCamelCase__ = analyze_results(*__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(__lowerCAmelCase ) ) if len(__lowerCAmelCase ) > 0: raise ValueError("""\n\n""".join(__lowerCAmelCase ) ) def A__ ( ): lowerCamelCase__ = [] for path, directories, files in os.walk(__lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(__lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue lowerCamelCase__ = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) ) lowerCamelCase__ = short_path.replace(os.path.sep , """.""" ) submodules.append(__lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue lowerCamelCase__ = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) ) lowerCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(__lowerCAmelCase ) return submodules UpperCamelCase : Union[str, Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def A__ ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCamelCase__ = direct_transformers_import(__lowerCAmelCase ) lowerCamelCase__ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" ) as f: lowerCamelCase__ = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __lowerCAmelCase ) ) ) lowerCamelCase__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def A__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue lowerCamelCase__ = cst_fwd.get(__lowerCAmelCase , np.inf ) lowerCamelCase__ = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) lowerCamelCase__ = new_cost_f lowerCamelCase__ = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ): lowerCamelCase__ = -1 lowerCamelCase__ = set() lowerCamelCase__ = set() lowerCamelCase__ = {source: 0} lowerCamelCase__ = {destination: 0} lowerCamelCase__ = {source: None} lowerCamelCase__ = {destination: None} lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = PriorityQueue() lowerCamelCase__ = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): lowerCamelCase__ , lowerCamelCase__ = queue_forward.get() visited_forward.add(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = queue_backward.get() visited_backward.add(__lowerCAmelCase ) lowerCamelCase__ = pass_and_relaxation( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) lowerCamelCase__ = pass_and_relaxation( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: lowerCamelCase__ = shortest_distance return shortest_path_distance UpperCamelCase : List[str] = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } UpperCamelCase : Dict = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase : List[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt') def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : int = 1_6000 ): lowerCamelCase__ = int(round(sample_rate * max_length ) ) if len(__lowerCAmelCase ) <= sample_length: return wav lowerCamelCase__ = randint(0 , len(__lowerCAmelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field(default=a ,metadata={'help': 'Name of a dataset from the datasets package'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'A file containing the training audio paths and labels.'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'A file containing the validation audio paths and labels.'} ) _UpperCamelCase = field( default='train' ,metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } ,) _UpperCamelCase = field( default='validation' ,metadata={ 'help': ( 'The name of the training data set split to use (via the datasets library). Defaults to \'validation\'' ) } ,) _UpperCamelCase = field( default='audio' ,metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} ,) _UpperCamelCase = field( default='label' ,metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} ) _UpperCamelCase = field( default=a ,metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } ,) _UpperCamelCase = field( default=a ,metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } ,) _UpperCamelCase = field( default=20 ,metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} ,) @dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = field( default='facebook/wav2vec2-base' ,metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ,) _UpperCamelCase = field( default=a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} ) _UpperCamelCase = field( default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,) _UpperCamelCase = field( default=a ,metadata={'help': 'Name or path of preprocessor config.'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} ) _UpperCamelCase = field( default=a ,metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } ,) _UpperCamelCase = field( default=a ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) _UpperCamelCase = field( default=a ,metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} ,) def UpperCamelCase_ ( self ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" ,_lowerCAmelCase ,) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def A__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __lowerCAmelCase , __lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__ = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. lowerCamelCase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. lowerCamelCase__ = DatasetDict() lowerCamelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' """Make sure to set `--audio_column_name` to the correct audio column - one of """ F'''{", ".join(raw_datasets["train"].column_names )}.''' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' """Make sure to set `--label_column_name` to the correct text column - one of """ F'''{", ".join(raw_datasets["train"].column_names )}.''' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy lowerCamelCase__ = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. lowerCamelCase__ = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) lowerCamelCase__ = feature_extractor.model_input_names[0] def train_transforms(__lowerCAmelCase : Any ): lowerCamelCase__ = [] for audio in batch[data_args.audio_column_name]: lowerCamelCase__ = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__lowerCAmelCase ) lowerCamelCase__ = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate ) lowerCamelCase__ = {model_input_name: inputs.get(__lowerCAmelCase )} lowerCamelCase__ = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__lowerCAmelCase : Any ): lowerCamelCase__ = [audio["""array"""] for audio in batch[data_args.audio_column_name]] lowerCamelCase__ = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate ) lowerCamelCase__ = {model_input_name: inputs.get(__lowerCAmelCase )} lowerCamelCase__ = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowerCamelCase__ = raw_datasets["""train"""].features[data_args.label_column_name].names lowerCamelCase__ , lowerCamelCase__ = {}, {} for i, label in enumerate(__lowerCAmelCase ): lowerCamelCase__ = str(__lowerCAmelCase ) lowerCamelCase__ = label # Load the accuracy metric from the datasets package lowerCamelCase__ = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase : List[Any] ): lowerCamelCase__ = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__lowerCAmelCase , references=eval_pred.label_ids ) lowerCamelCase__ = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__ = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: lowerCamelCase__ = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowerCamelCase__ = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase ) # Initialize our trainer lowerCamelCase__ = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) # Training if training_args.do_train: lowerCamelCase__ = None if training_args.resume_from_checkpoint is not None: lowerCamelCase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__ = last_checkpoint lowerCamelCase__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCamelCase__ = trainer.evaluate() trainer.log_metrics("""eval""" , __lowerCAmelCase ) trainer.save_metrics("""eval""" , __lowerCAmelCase ) # Write model card and (optionally) push to hub lowerCamelCase__ = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__lowerCAmelCase ) else: trainer.create_model_card(**__lowerCAmelCase ) if __name__ == "__main__": main()
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : int = { 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json', } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'bloom' _UpperCamelCase = ['past_key_values'] _UpperCamelCase = { 'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head', } def __init__( self ,_lowerCAmelCase=25_08_80 ,_lowerCAmelCase=64 ,_lowerCAmelCase=2 ,_lowerCAmelCase=8 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=1 ,_lowerCAmelCase=2 ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): lowerCamelCase__ = vocab_size # Backward compatibility with n_embed kwarg lowerCamelCase__ = kwargs.pop("""n_embed""" ,_lowerCAmelCase ) lowerCamelCase__ = hidden_size if n_embed is None else n_embed lowerCamelCase__ = n_layer lowerCamelCase__ = n_head lowerCamelCase__ = layer_norm_epsilon lowerCamelCase__ = initializer_range lowerCamelCase__ = use_cache lowerCamelCase__ = pretraining_tp lowerCamelCase__ = apply_residual_connection_post_layernorm lowerCamelCase__ = hidden_dropout lowerCamelCase__ = attention_dropout lowerCamelCase__ = bos_token_id lowerCamelCase__ = eos_token_id lowerCamelCase__ = slow_but_exact super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = version.parse('1.12' ) def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,): super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase ) if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ): # TODO: how to do that better? lowerCamelCase__ = 0 @property def UpperCamelCase_ ( self ): lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" ,inverted_values_shape=_lowerCAmelCase ) lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase_ ( self ): return self._config.n_layer @property def UpperCamelCase_ ( self ): return self._config.n_head @property def UpperCamelCase_ ( self ): return 1E-3 def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,): lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs( _lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ = seqlen + 2 lowerCamelCase__ = self._config.hidden_size // self.num_attention_heads lowerCamelCase__ = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowerCamelCase__ = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowerCamelCase__ = [ (torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers ) ] lowerCamelCase__ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self ): return 13
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = DiTPipeline _UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCamelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } _UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCamelCase = False def UpperCamelCase_ ( self ): torch.manual_seed(0 ) lowerCamelCase__ = TransformeraDModel( sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=_lowerCAmelCase ,activation_fn="""gelu-approximate""" ,num_embeds_ada_norm=10_00 ,norm_type="""ada_norm_zero""" ,norm_elementwise_affine=_lowerCAmelCase ,) lowerCamelCase__ = AutoencoderKL() lowerCamelCase__ = DDIMScheduler() lowerCamelCase__ = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=0 ): if str(_lowerCAmelCase ).startswith("""mps""" ): lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase ) else: lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) lowerCamelCase__ = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = """cpu""" lowerCamelCase__ = self.get_dummy_components() lowerCamelCase__ = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowerCamelCase__ = self.get_dummy_inputs(_lowerCAmelCase ) lowerCamelCase__ = pipe(**_lowerCAmelCase ).images lowerCamelCase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 16, 16, 3) ) lowerCamelCase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCAmelCase ,1E-3 ) def UpperCamelCase_ ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase ,expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def UpperCamelCase_ ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) lowerCamelCase__ = ["""vase""", """umbrella""", """white shark""", """white wolf"""] lowerCamelCase__ = pipe.get_label_ids(_lowerCAmelCase ) lowerCamelCase__ = pipe(_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=40 ,output_type="""np""" ).images for word, image in zip(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCamelCase_ ( self ): lowerCamelCase__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) lowerCamelCase__ = ["""vase""", """umbrella"""] lowerCamelCase__ = pipe.get_label_ids(_lowerCAmelCase ) lowerCamelCase__ = torch.manual_seed(0 ) lowerCamelCase__ = pipe(_lowerCAmelCase ,generator=_lowerCAmelCase ,num_inference_steps=25 ,output_type="""np""" ).images for word, image in zip(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="""utf-8""" ,check=_lowerCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # configuration for running training on smdistributed Model Parallel lowerCamelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' ,instance_count=_lowerCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=_lowerCAmelCase ,hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_00, } ,metric_definitions=self.env.metric_definitions ,distribution=_lowerCAmelCase ,py_version="""py36""" ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ): TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): # create estimator lowerCamelCase__ = self.create_estimator(_lowerCAmelCase ) # run training estimator.fit() # result dataframe lowerCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_lowerCAmelCase )
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder UpperCamelCase : Dict = '__DUMMY_TRANSFORMERS_USER__' UpperCamelCase : Tuple = 'Dummy User' UpperCamelCase : Tuple = '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' UpperCamelCase : List[Any] = 'https://hub-ci.huggingface.co' UpperCamelCase : Optional[int] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' UpperCamelCase : List[Any] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' UpperCamelCase : int = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def A__ ( __lowerCAmelCase : Any ): monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __lowerCAmelCase ) @pytest.fixture def A__ ( __lowerCAmelCase : List[str] ): monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __lowerCAmelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __lowerCAmelCase ) @pytest.fixture def A__ ( __lowerCAmelCase : List[str] ): monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __lowerCAmelCase ) @pytest.fixture def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): HfFolder.save_token(__lowerCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def A__ ( ): return HfApi(endpoint=__lowerCAmelCase ) @pytest.fixture(scope="""session""" ) def A__ ( __lowerCAmelCase : HfApi ): lowerCamelCase__ = HfFolder.get_token() HfFolder.save_token(__lowerCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__lowerCAmelCase ) @pytest.fixture def A__ ( __lowerCAmelCase : Tuple ): def _cleanup_repo(__lowerCAmelCase : Optional[int] ): hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def A__ ( __lowerCAmelCase : Any ): @contextmanager def _temporary_repo(__lowerCAmelCase : Optional[int] ): try: yield repo_id finally: cleanup_repo(__lowerCAmelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def A__ ( __lowerCAmelCase : HfApi , __lowerCAmelCase : int , __lowerCAmelCase : str ): lowerCamelCase__ = F'''repo_txt_data-{int(time.time() * 10e3 )}''' lowerCamelCase__ = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase ) hf_api.upload_file( token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def A__ ( __lowerCAmelCase : HfApi , __lowerCAmelCase : Dict , __lowerCAmelCase : int ): lowerCamelCase__ = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' lowerCamelCase__ = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase ) hf_api.upload_file( token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def A__ ( __lowerCAmelCase : HfApi , __lowerCAmelCase : str , __lowerCAmelCase : Any ): lowerCamelCase__ = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' lowerCamelCase__ = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase ) hf_api.upload_file( token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ): return hf_private_dataset_repo_zipped_img_data_
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) class UpperCamelCase__ (metaclass=a ): '''simple docstring''' _UpperCamelCase = ['flax'] def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(self ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] ) @classmethod def UpperCamelCase_ ( cls ,*_lowerCAmelCase ,**_lowerCAmelCase ): requires_backends(cls ,["""flax"""] )
9
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
1
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : Dict = 'T5Config' def A__ ( __lowerCAmelCase : jnp.array , __lowerCAmelCase : int , __lowerCAmelCase : int ): lowerCamelCase__ = jnp.zeros_like(__lowerCAmelCase ) lowerCamelCase__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowerCamelCase__ = shifted_input_ids.at[:, 0].set(__lowerCAmelCase ) lowerCamelCase__ = jnp.where(shifted_input_ids == -100 , __lowerCAmelCase , __lowerCAmelCase ) return shifted_input_ids class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'mt5' _UpperCamelCase = MTaConfig class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'mt5' _UpperCamelCase = MTaConfig class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'mt5' _UpperCamelCase = MTaConfig
9
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' import re from filelock import FileLock try: import nltk UpperCamelCase : Dict = True except (ImportError, ModuleNotFoundError): UpperCamelCase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def A__ ( __lowerCAmelCase : str ): re.sub("""<n>""" , """""" , __lowerCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__lowerCAmelCase ) )
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *_lowerCAmelCase ,**_lowerCAmelCase ): pass @is_pipeline_test @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' _UpperCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) lowerCamelCase__ = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = vqa_pipeline(_lowerCAmelCase ,top_k=1 ) self.assertEqual( _lowerCAmelCase ,[ [{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}], [{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}], ] ,) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) lowerCamelCase__ = """./tests/fixtures/tests_samples/COCO/000000039769.png""" lowerCamelCase__ = """How many cats are there?""" lowerCamelCase__ = vqa_pipeline(image=_lowerCAmelCase ,question="""How many cats are there?""" ,top_k=2 ) self.assertEqual( _lowerCAmelCase ,[{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}, {"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}] ) lowerCamelCase__ = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( _lowerCAmelCase ,[{"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}, {"""score""": ANY(_lowerCAmelCase ), """answer""": ANY(_lowerCAmelCase )}] ) @slow @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" ) lowerCamelCase__ = """./tests/fixtures/tests_samples/COCO/000000039769.png""" lowerCamelCase__ = """How many cats are there?""" lowerCamelCase__ = vqa_pipeline(image=_lowerCAmelCase ,question=_lowerCAmelCase ,top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) lowerCamelCase__ = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) lowerCamelCase__ = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase ,decimals=4 ) ,[[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 ,) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def UpperCamelCase_ ( self ): pass
9
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
1
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) UpperCamelCase : List[Any] = logging.getLogger(__name__) @dataclass(frozen=a ) class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None @dataclass(frozen=a ) class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 42 def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=False ,_lowerCAmelCase = False ,): lowerCamelCase__ = hans_processors[task]() lowerCamelCase__ = os.path.join( _lowerCAmelCase ,"""cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(_lowerCAmelCase ) ,_lowerCAmelCase ,) ,) lowerCamelCase__ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1] lowerCamelCase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not overwrite_cache: logger.info(F'''Loading features from cached file {cached_features_file}''' ) lowerCamelCase__ = torch.load(_lowerCAmelCase ) else: logger.info(F'''Creating features from dataset file at {data_dir}''' ) lowerCamelCase__ = ( processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase ) ) logger.info("""Training examples: %s""" ,len(_lowerCAmelCase ) ) lowerCamelCase__ = hans_convert_examples_to_features(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) logger.info("""Saving features into cached file %s""" ,_lowerCAmelCase ) torch.save(self.features ,_lowerCAmelCase ) def __len__( self ): return len(self.features ) def __getitem__( self ,_lowerCAmelCase ): return self.features[i] def UpperCamelCase_ ( self ): return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 42 def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 1_28 ,_lowerCAmelCase=False ,_lowerCAmelCase = False ,): lowerCamelCase__ = hans_processors[task]() lowerCamelCase__ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1] lowerCamelCase__ = label_list lowerCamelCase__ = processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase ) lowerCamelCase__ = hans_convert_examples_to_features(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(_lowerCAmelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowerCamelCase__ = tf.data.Dataset.from_generator( _lowerCAmelCase ,( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) ,( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) ,) def UpperCamelCase_ ( self ): return self.dataset def __len__( self ): return len(self.features ) def __getitem__( self ,_lowerCAmelCase ): return self.features[i] def UpperCamelCase_ ( self ): return self.label_list class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase ,"""heuristics_train_set.txt""" ) ) ,"""train""" ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" ) def UpperCamelCase_ ( self ): return ["contradiction", "entailment", "neutral"] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [] for i, line in enumerate(_lowerCAmelCase ): if i == 0: continue lowerCamelCase__ = """%s-%s""" % (set_type, line[0]) lowerCamelCase__ = line[5] lowerCamelCase__ = line[6] lowerCamelCase__ = line[7][2:] if line[7].startswith("""ex""" ) else line[7] lowerCamelCase__ = line[0] examples.append(InputExample(guid=_lowerCAmelCase ,text_a=_lowerCAmelCase ,text_b=_lowerCAmelCase ,label=_lowerCAmelCase ,pairID=_lowerCAmelCase ) ) return examples def A__ ( __lowerCAmelCase : List[InputExample] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : PreTrainedTokenizer , ): lowerCamelCase__ = {label: i for i, label in enumerate(__lowerCAmelCase )} lowerCamelCase__ = [] for ex_index, example in tqdm.tqdm(enumerate(__lowerCAmelCase ) , desc="""convert examples to features""" ): if ex_index % 1_0000 == 0: logger.info("""Writing example %d""" % (ex_index) ) lowerCamelCase__ = tokenizer( example.text_a , example.text_b , add_special_tokens=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , truncation=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , ) lowerCamelCase__ = label_map[example.label] if example.label in label_map else 0 lowerCamelCase__ = int(example.pairID ) features.append(InputFeatures(**__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(F'''guid: {example}''' ) logger.info(F'''features: {features[i]}''' ) return features UpperCamelCase : List[Any] = { 'hans': 3, } UpperCamelCase : Tuple = { 'hans': HansProcessor, }
9
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
1
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = None @staticmethod def UpperCamelCase_ ( ): raise NotImplementedError def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): raise NotImplementedError def UpperCamelCase_ ( self ,_lowerCAmelCase ): raise NotImplementedError def UpperCamelCase_ ( self ): if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def UpperCamelCase_ ( cls ): return F'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'optuna' @staticmethod def UpperCamelCase_ ( ): return is_optuna_available() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return run_hp_search_optuna(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return default_hp_space_optuna(_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'ray' _UpperCamelCase = '\'ray[tune]\'' @staticmethod def UpperCamelCase_ ( ): return is_ray_available() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return run_hp_search_ray(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return default_hp_space_ray(_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'sigopt' @staticmethod def UpperCamelCase_ ( ): return is_sigopt_available() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return run_hp_search_sigopt(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return default_hp_space_sigopt(_lowerCAmelCase ) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'wandb' @staticmethod def UpperCamelCase_ ( ): return is_wandb_available() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ): return run_hp_search_wandb(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return default_hp_space_wandb(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def A__ ( ): lowerCamelCase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = available_backends[0].name if len(__lowerCAmelCase ) > 1: logger.info( F'''{len(__lowerCAmelCase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
9
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCamelCase : List[str] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase__ (datasets.BuilderConfig ): '''simple docstring''' _UpperCamelCase = None def A__ ( __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : List[int] , ): import pyspark def generate_fn(): lowerCamelCase__ = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: lowerCamelCase__ = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" ) lowerCamelCase__ = partition_df.collect() lowerCamelCase__ = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class UpperCamelCase__ (_BaseExamplesIterable ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=None ,): lowerCamelCase__ = df lowerCamelCase__ = partition_order or range(self.df.rdd.getNumPartitions() ) lowerCamelCase__ = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCAmelCase ) return SparkExamplesIterable(self.df ,partition_order=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.split_shard_indices_by_worker(_lowerCAmelCase ,_lowerCAmelCase ) return SparkExamplesIterable(self.df ,partition_order=_lowerCAmelCase ) @property def UpperCamelCase_ ( self ): return len(self.partition_order ) class UpperCamelCase__ (datasets.DatasetBuilder ): '''simple docstring''' _UpperCamelCase = SparkConfig def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): import pyspark lowerCamelCase__ = pyspark.sql.SparkSession.builder.getOrCreate() lowerCamelCase__ = df lowerCamelCase__ = working_dir super().__init__( cache_dir=_lowerCAmelCase ,config_name=str(self.df.semanticHash() ) ,**_lowerCAmelCase ,) def UpperCamelCase_ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(_lowerCAmelCase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=_lowerCAmelCase ) lowerCamelCase__ = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCAmelCase ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCamelCase__ = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_lowerCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def UpperCamelCase_ ( self ): return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def UpperCamelCase_ ( self ,_lowerCAmelCase ): import pyspark def get_arrow_batch_size(_lowerCAmelCase ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) lowerCamelCase__ = self.df.count() lowerCamelCase__ = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCamelCase__ = ( self.df.limit(_lowerCAmelCase ) .repartition(1 ) .mapInArrow(_lowerCAmelCase ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCamelCase__ = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCamelCase__ = min(_lowerCAmelCase ,int(approx_total_size / max_shard_size ) ) lowerCamelCase__ = self.df.repartition(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): import pyspark lowerCamelCase__ = ParquetWriter if file_format == """parquet""" else ArrowWriter lowerCamelCase__ = os.path.join(self._working_dir ,os.path.basename(_lowerCAmelCase ) ) if self._working_dir else fpath lowerCamelCase__ = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCamelCase__ = self.config.features lowerCamelCase__ = self._writer_batch_size lowerCamelCase__ = self._fs.storage_options def write_arrow(_lowerCAmelCase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCamelCase__ = pyspark.TaskContext().taskAttemptId() lowerCamelCase__ = next(_lowerCAmelCase ,_lowerCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) lowerCamelCase__ = 0 lowerCamelCase__ = writer_class( features=_lowerCAmelCase ,path=working_fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,writer_batch_size=_lowerCAmelCase ,storage_options=_lowerCAmelCase ,embed_local_files=_lowerCAmelCase ,) lowerCamelCase__ = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCamelCase__ , lowerCamelCase__ = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 lowerCamelCase__ = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,writer_batch_size=_lowerCAmelCase ,storage_options=_lowerCAmelCase ,embed_local_files=_lowerCAmelCase ,) lowerCamelCase__ = pa.Table.from_batches([batch] ) writer.write_table(_lowerCAmelCase ) if writer._num_bytes > 0: lowerCamelCase__ , lowerCamelCase__ = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCAmelCase ) ): lowerCamelCase__ = os.path.join(os.path.dirname(_lowerCAmelCase ) ,os.path.basename(_lowerCAmelCase ) ) shutil.move(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = ( self.df.mapInArrow(_lowerCAmelCase ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = "arrow" ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): self._validate_cache_dir() lowerCamelCase__ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCAmelCase ) lowerCamelCase__ = not is_remote_filesystem(self._fs ) lowerCamelCase__ = os.path.join if is_local else posixpath.join lowerCamelCase__ = """-TTTTT-SSSSS-of-NNNNN""" lowerCamelCase__ = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' lowerCamelCase__ = path_join(self._output_dir ,_lowerCAmelCase ) lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = [] lowerCamelCase__ = [] for task_id, content in self._prepare_split_single(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCAmelCase ) lowerCamelCase__ = total_num_examples lowerCamelCase__ = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: lowerCamelCase__ = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCamelCase__ = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): rename( _lowerCAmelCase ,fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,fpath.replace("""TTTTT-SSSSS""" ,F'''{global_shard_id:05d}''' ).replace("""NNNNN""" ,F'''{total_shards:05d}''' ) ,) lowerCamelCase__ = [] lowerCamelCase__ = 0 for i in range(len(_lowerCAmelCase ) ): lowerCamelCase__ , lowerCamelCase__ = task_id_and_num_shards[i] for shard_id in range(_lowerCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCAmelCase ,len(_lowerCAmelCase ) ).map(lambda _lowerCAmelCase : _rename_shard(*_lowerCAmelCase ) ).collect() else: # don't use any pattern lowerCamelCase__ = 0 lowerCamelCase__ = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,F'''{shard_id:05d}''' ).replace("""TTTTT""" ,F'''{task_id:05d}''' ) ,fpath.replace(_lowerCAmelCase ,"""""" ) ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,): return SparkExamplesIterable(self.df )
9
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
1
'''simple docstring''' # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCamelCase : List[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') UpperCamelCase : List[Any] = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split() UpperCamelCase : List[Any] = '|'.join(sys.argv[1:]) UpperCamelCase : str = re.compile(rF'^({joined_dirs}).*?\.py$') UpperCamelCase : Any = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
9
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
1
'''simple docstring''' import argparse from collections import defaultdict import yaml UpperCamelCase : Optional[int] = 'docs/source/en/_toctree.yml' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = defaultdict(__lowerCAmelCase ) for doc in model_doc: counts[doc["local"]] += 1 lowerCamelCase__ = [key for key, value in counts.items() if value > 1] lowerCamelCase__ = [] for duplicate_key in duplicates: lowerCamelCase__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} ) if len(__lowerCAmelCase ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] ) # Sort return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() ) def A__ ( __lowerCAmelCase : Dict=False ): with open(__lowerCAmelCase , encoding="""utf-8""" ) as f: lowerCamelCase__ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase__ = content[api_idx]["""sections"""] # Then to the model doc lowerCamelCase__ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCamelCase__ = api_doc[model_idx]["""sections"""] lowerCamelCase__ = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section] lowerCamelCase__ = False for idx, modality_doc in modalities_docs: lowerCamelCase__ = modality_doc["""sections"""] lowerCamelCase__ = clean_model_doc_toc(__lowerCAmelCase ) if old_modality_doc != new_modality_doc: lowerCamelCase__ = True if overwrite: lowerCamelCase__ = new_modality_doc if diff: if overwrite: lowerCamelCase__ = model_doc lowerCamelCase__ = api_doc with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : str = parser.parse_args() check_model_doc(args.fix_and_overwrite)
9
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( 'pipelines_utils', '0.22.0', 'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.', standard_warn=False, stacklevel=3, )
9
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""iterations must be defined as integers""" ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" ) if not iterations >= 1: raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" ) lowerCamelCase__ = """""" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
1
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file UpperCamelCase : int = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.' def A__ ( __lowerCAmelCase : List[str]=None ): if subparsers is not None: lowerCamelCase__ = subparsers.add_parser("""tpu-config""" , description=_description ) else: lowerCamelCase__ = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description ) # Core arguments lowerCamelCase__ = parser.add_argument_group( """Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" ) config_args.add_argument( """--config_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Path to the config file to use for accelerate.""" , ) config_args.add_argument( """--tpu_name""" , default=__lowerCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , ) config_args.add_argument( """--tpu_zone""" , default=__lowerCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , ) lowerCamelCase__ = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" ) pod_args.add_argument( """--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , ) pod_args.add_argument( """--command_file""" , default=__lowerCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , ) pod_args.add_argument( """--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , ) pod_args.add_argument( """--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , ) pod_args.add_argument( """--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , ) pod_args.add_argument( """--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" ) if subparsers is not None: parser.set_defaults(func=__lowerCAmelCase ) return parser def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__lowerCAmelCase ): lowerCamelCase__ = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: lowerCamelCase__ = defaults.command_file if not args.command and defaults.commands is not None: lowerCamelCase__ = defaults.commands if not args.tpu_name: lowerCamelCase__ = defaults.tpu_name if not args.tpu_zone: lowerCamelCase__ = defaults.tpu_zone if args.accelerate_version == "dev": lowerCamelCase__ = """git+https://github.com/huggingface/accelerate.git""" elif args.accelerate_version == "latest": lowerCamelCase__ = """accelerate -U""" elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ): lowerCamelCase__ = F'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError("""You must specify either a command file or a command to run on the pod.""" ) if args.command_file: with open(args.command_file , """r""" ) as f: lowerCamelCase__ = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __lowerCAmelCase ): lowerCamelCase__ = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate lowerCamelCase__ = ["""cd /usr/share"""] if args.install_accelerate: new_cmd += [F'''pip install {args.accelerate_version}'''] new_cmd += args.command lowerCamelCase__ = """; """.join(__lowerCAmelCase ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess lowerCamelCase__ = ["""gcloud"""] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F'''Running {" ".join(__lowerCAmelCase )}''' ) return subprocess.run(__lowerCAmelCase ) print("""Successfully setup pod.""" ) def A__ ( ): lowerCamelCase__ = tpu_command_parser() lowerCamelCase__ = parser.parse_args() tpu_command_launcher(__lowerCAmelCase )
9
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
1
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
1
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def A__ ( *__lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = list(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def A__ ( __lowerCAmelCase : Exception ): lowerCamelCase__ = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def A__ ( __lowerCAmelCase : callable = None , __lowerCAmelCase : int = 128 ): if function is None: return functools.partial(__lowerCAmelCase , starting_batch_size=__lowerCAmelCase ) lowerCamelCase__ = starting_batch_size def decorator(*__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : int ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() lowerCamelCase__ = list(inspect.signature(__lowerCAmelCase ).parameters.keys() ) # Guard against user error if len(__lowerCAmelCase ) < (len(__lowerCAmelCase ) + 1): lowerCamelCase__ = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) except Exception as e: if should_reduce_batch_size(__lowerCAmelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
9
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
1
'''simple docstring''' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError("""String lengths must match!""" ) lowerCamelCase__ = 0 for chara, chara in zip(__lowerCAmelCase , __lowerCAmelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
1
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,): lowerCamelCase__ = parent lowerCamelCase__ = 13 lowerCamelCase__ = 7 lowerCamelCase__ = 30 lowerCamelCase__ = self.seq_length + self.mem_len lowerCamelCase__ = 15 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 99 lowerCamelCase__ = [10, 50, 80] lowerCamelCase__ = 32 lowerCamelCase__ = 32 lowerCamelCase__ = 4 lowerCamelCase__ = 8 lowerCamelCase__ = 1_28 lowerCamelCase__ = 2 lowerCamelCase__ = 2 lowerCamelCase__ = None lowerCamelCase__ = 1 lowerCamelCase__ = 0 lowerCamelCase__ = 3 lowerCamelCase__ = self.vocab_size - 1 lowerCamelCase__ = 0.01 def UpperCamelCase_ ( self ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCamelCase_ ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFTransfoXLModel(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = model(_lowerCAmelCase ).to_tuple() lowerCamelCase__ = {"""input_ids""": input_ids_a, """mems""": mems_a} lowerCamelCase__ , lowerCamelCase__ = model(_lowerCAmelCase ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFTransfoXLLMHeadModel(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = model(_lowerCAmelCase ).to_tuple() lowerCamelCase__ = {"""input_ids""": input_ids_a, """labels""": lm_labels} lowerCamelCase__ , lowerCamelCase__ = model(_lowerCAmelCase ).to_tuple() lowerCamelCase__ , lowerCamelCase__ = model([input_ids_a, mems_a] ).to_tuple() lowerCamelCase__ = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} lowerCamelCase__ , lowerCamelCase__ = model(_lowerCAmelCase ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFTransfoXLForSequenceClassification(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _UpperCamelCase = () if is_tf_available() else () _UpperCamelCase = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFTransfoXLModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,d_embed=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): self.model_tester.set_seed() lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.model_tester.set_seed() lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowerCamelCase__ = model.get_output_embeddings() assert isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) lowerCamelCase__ = model.get_bias() assert name is None else: lowerCamelCase__ = model.get_output_embeddings() assert x is None lowerCamelCase__ = model.get_bias() assert name is None def UpperCamelCase_ ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def UpperCamelCase_ ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = TFTransfoXLModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def UpperCamelCase_ ( self ): pass @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off lowerCamelCase__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowerCamelCase__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_length=2_00 ,do_sample=_lowerCAmelCase ) self.assertListEqual(output_ids[0].numpy().tolist() ,_lowerCAmelCase )
9
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
1
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=64 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=[1, 16, 4, 4] ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = scope lowerCamelCase__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size lowerCamelCase__ = (self.image_size // 32) ** 2 lowerCamelCase__ = num_patches + 1 def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): lowerCamelCase__ = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=_lowerCAmelCase ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = ViTHybridModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = self.type_sequence_label_size lowerCamelCase__ = ViTHybridForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowerCamelCase__ = model(_lowerCAmelCase ,labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _UpperCamelCase = ( {'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = ViTHybridModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,nn.Linear ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = _config_zero_init(_lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(config=_lowerCAmelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": lowerCamelCase__ = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @slow def UpperCamelCase_ ( self ): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = ViTHybridModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( _lowerCAmelCase ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase__ = model(**_lowerCAmelCase ) # verify the logits lowerCamelCase__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCAmelCase ,atol=1E-4 ) ) @slow @require_accelerate def UpperCamelCase_ ( self ): lowerCamelCase__ = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" ) lowerCamelCase__ = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" ,device_map="""auto""" ) lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""pt""" ) lowerCamelCase__ = model(**_lowerCAmelCase ) lowerCamelCase__ = outputs.logits # model predicts one of the 1000 ImageNet classes lowerCamelCase__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] ,"""tabby, tabby cat""" )
9
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.4814_5466, 0.457_8275, 0.4082_1073] ,_lowerCAmelCase=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,_lowerCAmelCase=True ,): lowerCamelCase__ = size if size is not None else {"""height""": 2_24, """width""": 2_24} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std lowerCamelCase__ = do_convert_rgb def UpperCamelCase_ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase_ ( self ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase__ = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: lowerCamelCase__ = [] for i in range(self.batch_size ): lowerCamelCase__ , lowerCamelCase__ = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(2_55 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] if torchify: lowerCamelCase__ = [torch.from_numpy(_lowerCAmelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = ChineseCLIPImageProcessingTester(self ,do_center_crop=_lowerCAmelCase ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_convert_rgb""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 2_24, """width""": 2_24} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=_lowerCAmelCase ) lowerCamelCase__ = 3 @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_convert_rgb""" ) ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
1
'''simple docstring''' import cva import numpy as np class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ): if k in (0.04, 0.06): lowerCamelCase__ = k lowerCamelCase__ = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = cva.imread(_lowerCAmelCase ,0 ) lowerCamelCase__ , lowerCamelCase__ = img.shape lowerCamelCase__ = [] lowerCamelCase__ = img.copy() lowerCamelCase__ = cva.cvtColor(_lowerCAmelCase ,cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ = np.gradient(_lowerCAmelCase ) lowerCamelCase__ = dx**2 lowerCamelCase__ = dy**2 lowerCamelCase__ = dx * dy lowerCamelCase__ = 0.04 lowerCamelCase__ = self.window_size // 2 for y in range(_lowerCAmelCase ,h - offset ): for x in range(_lowerCAmelCase ,w - offset ): lowerCamelCase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ = (wxx * wyy) - (wxy**2) lowerCamelCase__ = wxx + wyy lowerCamelCase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCamelCase : Optional[Any] = HarrisCorner(0.04, 3) UpperCamelCase , UpperCamelCase : Tuple = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
9
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def A__ ( __lowerCAmelCase : Union[str, Any] ): if hor == 128: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: lowerCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") lowerCamelCase__ = (32, 64, 128, 256) lowerCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") lowerCamelCase__ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) lowerCamelCase__ = model.state_dict() lowerCamelCase__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } lowerCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) lowerCamelCase__ = model lowerCamelCase__ = UNetaDModel(**__lowerCAmelCase ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) lowerCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowerCamelCase__ = state_dict.pop(__lowerCAmelCase ) hf_value_function.load_state_dict(__lowerCAmelCase ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
9
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask UpperCamelCase : Any = logging.getLogger(__name__) class UpperCamelCase__ (_lowerCamelCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase=-1 ): # in NER datasets, the last column is usually reserved for NER label lowerCamelCase__ = label_idx def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): if isinstance(A__ ,A__ ): lowerCamelCase__ = mode.value lowerCamelCase__ = os.path.join(A__ ,F'''{mode}.txt''' ) lowerCamelCase__ = 1 lowerCamelCase__ = [] with open(A__ ,encoding="""utf-8""" ) as f: lowerCamelCase__ = [] lowerCamelCase__ = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' ,words=A__ ,labels=A__ ) ) guid_index += 1 lowerCamelCase__ = [] lowerCamelCase__ = [] else: lowerCamelCase__ = line.split(""" """ ) words.append(splits[0] ) if len(A__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" ,"""""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' ,words=A__ ,labels=A__ ) ) return examples def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(A__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCamelCase__ = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(A__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" ,line.split()[0] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if path: with open(A__ ,"""r""" ) as f: lowerCamelCase__ = f.read().splitlines() if "O" not in labels: lowerCamelCase__ = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase__ (_lowerCamelCase ): '''simple docstring''' def __init__( self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if path: with open(A__ ,"""r""" ) as f: lowerCamelCase__ = f.read().splitlines() if "O" not in labels: lowerCamelCase__ = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase__ (_lowerCamelCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): if isinstance(A__ ,A__ ): lowerCamelCase__ = mode.value lowerCamelCase__ = os.path.join(A__ ,F'''{mode}.txt''' ) lowerCamelCase__ = 1 lowerCamelCase__ = [] with open(A__ ,encoding="""utf-8""" ) as f: for sentence in parse_incr(A__ ): lowerCamelCase__ = [] lowerCamelCase__ = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(A__ ) == len(A__ ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' ,words=A__ ,labels=A__ ) ) guid_index += 1 return examples def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = 0 for sentence in parse_incr(A__ ): lowerCamelCase__ = preds_list[example_id] lowerCamelCase__ = """""" for token in sentence: out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ''' out += "\n" writer.write(A__ ) example_id += 1 def UpperCamelCase_ ( self ,_lowerCAmelCase ): if path: with open(A__ ,"""r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
700
'''simple docstring''' import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,_lowerCAmelCase = "▁" ,_lowerCAmelCase = True ,_lowerCAmelCase = "<unk>" ,_lowerCAmelCase = "</s>" ,_lowerCAmelCase = "<pad>" ,): lowerCamelCase__ = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } lowerCamelCase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCamelCase__ = token_dict["""token"""] lowerCamelCase__ = Tokenizer(Unigram() ) lowerCamelCase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) ,""" """ ), normalizers.Lowercase(), ] ) lowerCamelCase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ), pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCamelCase__ = decoders.Metaspace(replacement=_lowerCAmelCase ,add_prefix_space=_lowerCAmelCase ) lowerCamelCase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''' ,special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] ,) lowerCamelCase__ = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = [files] self._tokenizer.train(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = 80_00 ,_lowerCAmelCase = True ,): lowerCamelCase__ = trainers.UnigramTrainer( vocab_size=_lowerCAmelCase ,special_tokens=self.special_tokens_list ,show_progress=_lowerCAmelCase ,) self._tokenizer.train_from_iterator(_lowerCAmelCase ,trainer=_lowerCAmelCase ) self.add_unk_id() def UpperCamelCase_ ( self ): lowerCamelCase__ = json.loads(self._tokenizer.to_str() ) lowerCamelCase__ = self.special_tokens["""unk"""]["""id"""] lowerCamelCase__ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) )
9
0
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = 10 def UpperCamelCase_ ( self ): lowerCamelCase__ = [1, 2, 3, 4] lowerCamelCase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.""" lowerCamelCase__ , lowerCamelCase__ = process_story(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ ,[] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """""" lowerCamelCase__ , lowerCamelCase__ = process_story(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ ,[] ) self.assertEqual(lowerCAmelCase__ ,[] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) lowerCamelCase__ , lowerCamelCase__ = process_story(lowerCAmelCase__ ) lowerCamelCase__ = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ ) lowerCamelCase__ = ["""It was the best of times."""] self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.tensor([1, 2, 3, 4] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,0 ).numpy() ,expected.numpy() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,23 ).numpy() ,expected.numpy() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,1 ).numpy() ,expected.numpy() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = 1_01 lowerCamelCase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) lowerCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCamelCase__ = compute_token_type_ids(lowerCAmelCase__ ,lowerCAmelCase__ ) np.testing.assert_array_equal(lowerCAmelCase__ ,lowerCAmelCase__ )
701
'''simple docstring''' from __future__ import annotations import math def A__ ( __lowerCAmelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) lowerCamelCase__ = [] for num in range(len(__lowerCAmelCase ) ): lowerCamelCase__ = 0 while 2 * i * i <= odd_composites[num]: lowerCamelCase__ = odd_composites[num] - 2 * i * i if is_prime(__lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__lowerCAmelCase ) == n: return list_nums return [] def A__ ( ): return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
9
0
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class UpperCamelCase__ (snake_case__ ): '''simple docstring''' _UpperCamelCase = 42 _UpperCamelCase = None def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=0.999 , __lowerCAmelCase : List[str]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__lowerCAmelCase : str ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__lowerCAmelCase : List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCamelCase__ = [] for i in range(_SCREAMING_SNAKE_CASE ): lowerCamelCase__ = i / num_diffusion_timesteps lowerCamelCase__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCamelCase__ (snake_case__ ,snake_case__ ): '''simple docstring''' @register_to_config def __init__( self ,_lowerCAmelCase = 10_00 ,_lowerCAmelCase = "fixed_small_log" ,_lowerCAmelCase = True ,_lowerCAmelCase = 1.0 ,_lowerCAmelCase = "epsilon" ,_lowerCAmelCase = "squaredcos_cap_v2" ,): if beta_schedule != "squaredcos_cap_v2": raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" ) lowerCamelCase__ = betas_for_alpha_bar(UpperCAmelCase_ ) lowerCamelCase__ = 1.0 - self.betas lowerCamelCase__ = torch.cumprod(self.alphas ,dim=0 ) lowerCamelCase__ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCamelCase__ = 1.0 # setable values lowerCamelCase__ = None lowerCamelCase__ = torch.from_numpy(np.arange(0 ,UpperCAmelCase_ )[::-1].copy() ) lowerCamelCase__ = variance_type def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): return sample def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = num_inference_steps lowerCamelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCamelCase__ = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCamelCase__ = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ): if prev_timestep is None: lowerCamelCase__ = t - 1 lowerCamelCase__ = self.alphas_cumprod[t] lowerCamelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCamelCase__ = 1 - alpha_prod_t lowerCamelCase__ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCamelCase__ = self.betas[t] else: lowerCamelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCamelCase__ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCamelCase__ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCamelCase__ = torch.log(torch.clamp(UpperCAmelCase_ ,min=1E-20 ) ) lowerCamelCase__ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCamelCase__ = variance.log() lowerCamelCase__ = beta.log() lowerCamelCase__ = (predicted_variance + 1) / 2 lowerCamelCase__ = frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=None ,_lowerCAmelCase = True ,): lowerCamelCase__ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCamelCase__ , lowerCamelCase__ = torch.split(UpperCAmelCase_ ,sample.shape[1] ,dim=1 ) else: lowerCamelCase__ = None # 1. compute alphas, betas if prev_timestep is None: lowerCamelCase__ = t - 1 lowerCamelCase__ = self.alphas_cumprod[t] lowerCamelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCamelCase__ = 1 - alpha_prod_t lowerCamelCase__ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCamelCase__ = self.betas[t] lowerCamelCase__ = self.alphas[t] else: lowerCamelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev lowerCamelCase__ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCamelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCamelCase__ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' """ for the UnCLIPScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCamelCase__ = torch.clamp( UpperCAmelCase_ ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCamelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCamelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCamelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCamelCase__ = 0 if t > 0: lowerCamelCase__ = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=UpperCAmelCase_ ,device=model_output.device ) lowerCamelCase__ = self._get_variance( UpperCAmelCase_ ,predicted_variance=UpperCAmelCase_ ,prev_timestep=UpperCAmelCase_ ,) if self.variance_type == "fixed_small_log": lowerCamelCase__ = variance elif self.variance_type == "learned_range": lowerCamelCase__ = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' """ for the UnCLIPScheduler.""" ) lowerCamelCase__ = variance * variance_noise lowerCamelCase__ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase_ ,pred_original_sample=UpperCAmelCase_ ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) lowerCamelCase__ = timesteps.to(original_samples.device ) lowerCamelCase__ = alphas_cumprod[timesteps] ** 0.5 lowerCamelCase__ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCamelCase__ = sqrt_alpha_prod.unsqueeze(-1 ) lowerCamelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCamelCase__ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCamelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCamelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
702
'''simple docstring''' def A__ ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(__lowerCAmelCase , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
9
0
'''simple docstring''' import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase : str = logging.getLogger(__name__) UpperCamelCase : List[str] = "pytorch_model.bin" @dataclasses.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} ,) @dataclasses.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) _UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'A csv or a json file containing the validation data.'} ) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'The name of the task to train on.'} ,) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class UpperCamelCase__ : '''simple docstring''' _UpperCamelCase = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) _UpperCamelCase = dataclasses.field( default='accuracy' ,metadata={'help': 'The evaluation metric used for the task.'} ) _UpperCamelCase = dataclasses.field( default='no' ,metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]' } ,) _UpperCamelCase = dataclasses.field( default=10 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,) _UpperCamelCase = dataclasses.field( default=0.0 ,metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } ,) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} ,) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} ,) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} ,) _UpperCamelCase = dataclasses.field( default=0.0 ,metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} ,) _UpperCamelCase = dataclasses.field( default=100 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,) _UpperCamelCase = dataclasses.field( default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Random seed for initialization.'} ,) def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): lowerCamelCase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: lowerCamelCase__ = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowerCamelCase__ = int(eval_result * len(__lowerCAmelCase ) ) print(__lowerCAmelCase ) lowerCamelCase__ = dataset.sort("""probability""" , reverse=__lowerCAmelCase ) lowerCamelCase__ = dataset.select(range(__lowerCAmelCase ) ) lowerCamelCase__ = dataset.remove_columns(["""label""", """probability"""] ) lowerCamelCase__ = dataset.rename_column("""prediction""" , """label""" ) lowerCamelCase__ = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} ) lowerCamelCase__ = dataset.shuffle(seed=args.seed ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase ) else: dataset.to_json(__lowerCAmelCase ) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , **__lowerCAmelCase : List[Any] ): lowerCamelCase__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowerCamelCase__ = STModelArguments(model_name_or_path=__lowerCAmelCase ) lowerCamelCase__ = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase ) lowerCamelCase__ = STTrainingArguments(output_dir=__lowerCAmelCase ) lowerCamelCase__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(__lowerCAmelCase ).items(): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for key, value in kwargs.items(): if hasattr(__lowerCAmelCase , __lowerCAmelCase ): setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Sanity checks lowerCamelCase__ = {} lowerCamelCase__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowerCamelCase__ = args.train_file lowerCamelCase__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowerCamelCase__ = args.eval_file for key in data_files: lowerCamelCase__ = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: lowerCamelCase__ = extension else: assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) lowerCamelCase__ = F'''{args.output_dir}/self-train_iter-{{}}'''.format lowerCamelCase__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) accelerator.wait_for_everyone() lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = 0 lowerCamelCase__ = False # Show the progress bar lowerCamelCase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): lowerCamelCase__ = data_dir_format(__lowerCAmelCase ) assert os.path.exists(__lowerCAmelCase ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowerCamelCase__ = os.path.join(__lowerCAmelCase , """stage-1""" ) lowerCamelCase__ = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ): arguments_dict.update({key: value} ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """best-checkpoint""" , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __lowerCAmelCase , __lowerCAmelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __lowerCAmelCase ) finetune(**__lowerCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __lowerCAmelCase ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowerCamelCase__ = os.path.join(__lowerCAmelCase , """best-checkpoint""" ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """stage-2""" ) # Update arguments_dict lowerCamelCase__ = model_path lowerCamelCase__ = data_files["""train"""] lowerCamelCase__ = current_output_dir lowerCamelCase__ = os.path.join(__lowerCAmelCase , """best-checkpoint""" , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __lowerCAmelCase , __lowerCAmelCase , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __lowerCAmelCase ) finetune(**__lowerCAmelCase ) accelerator.wait_for_everyone() assert os.path.exists(__lowerCAmelCase ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __lowerCAmelCase ) lowerCamelCase__ = iteration lowerCamelCase__ = data_dir_format(iteration + 1 ) lowerCamelCase__ = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , """best-checkpoint""" ) ) lowerCamelCase__ = config.idalabel lowerCamelCase__ = os.path.join(__lowerCAmelCase , """eval_results_best-checkpoint.json""" ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """test_results_best-checkpoint.json""" ) assert os.path.exists(__lowerCAmelCase ) with open(__lowerCAmelCase , """r""" ) as f: lowerCamelCase__ = float(json.load(__lowerCAmelCase )[args.eval_metric] ) lowerCamelCase__ = os.path.join(__lowerCAmelCase , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(__lowerCAmelCase ) # Loading the dataset from local csv or json files. lowerCamelCase__ = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] lowerCamelCase__ = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(__lowerCAmelCase ): shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) accelerator.wait_for_everyone() lowerCamelCase__ = os.path.join(__lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: lowerCamelCase__ = eval_result if best_iteration is None: lowerCamelCase__ = new_iteration lowerCamelCase__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowerCamelCase__ = new_iteration lowerCamelCase__ = new_eval_result lowerCamelCase__ = 0 else: if new_eval_result == best_eval_result: lowerCamelCase__ = new_iteration lowerCamelCase__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowerCamelCase__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , __lowerCAmelCase ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(__lowerCAmelCase , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCAmelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(__lowerCAmelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__lowerCAmelCase , """eval_results_best-iteration.json""" ) , )
703
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } UpperCamelCase : List[Any] = { 'camembert-base': 5_12, } UpperCamelCase : List[str] = '▁' class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ['input_ids', 'attention_mask'] def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,) lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCAmelCase ) ) lowerCamelCase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} lowerCamelCase__ = len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase_ ( self ): return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCamelCase_ ( self ): lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] lowerCamelCase__ = """""" lowerCamelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowerCAmelCase ) + token lowerCamelCase__ = True lowerCamelCase__ = [] else: current_sub_tokens.append(_lowerCAmelCase ) lowerCamelCase__ = False out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def __getstate__( self ): lowerCamelCase__ = self.__dict__.copy() lowerCamelCase__ = None return state def __setstate__( self ,_lowerCAmelCase ): lowerCamelCase__ = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCamelCase__ = {} lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase__ = os.path.join( _lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase ,"""wb""" ) as fi: lowerCamelCase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
9
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope def UpperCamelCase_ ( self ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices ) lowerCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self ): return OpenLlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase_ ,initializer_range=self.initializer_range ,use_stable_embedding=UpperCamelCase_ ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = OpenLlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase__ = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ) lowerCamelCase__ = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = True lowerCamelCase__ = OpenLlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase__ = model( UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,encoder_hidden_states=UpperCamelCase_ ,encoder_attention_mask=UpperCamelCase_ ,) lowerCamelCase__ = model( UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,encoder_hidden_states=UpperCamelCase_ ,) lowerCamelCase__ = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = OpenLlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase__ = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,): lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = OpenLlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCamelCase__ = model( UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,encoder_hidden_states=UpperCamelCase_ ,encoder_attention_mask=UpperCamelCase_ ,use_cache=UpperCamelCase_ ,) lowerCamelCase__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowerCamelCase__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowerCamelCase__ = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowerCamelCase__ = torch.cat([input_mask, next_mask] ,dim=-1 ) lowerCamelCase__ = model( UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,encoder_hidden_states=UpperCamelCase_ ,encoder_attention_mask=UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ ,)['hidden_states'][0] lowerCamelCase__ = model( UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,encoder_hidden_states=UpperCamelCase_ ,encoder_attention_mask=UpperCamelCase_ ,past_key_values=UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ ,)['hidden_states'][0] # select random slice lowerCamelCase__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-3 ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ( lowerCamelCase__ ) = config_and_inputs lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ (a ,a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) _UpperCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () _UpperCamelCase = ( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = OpenLlamaModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=UpperCamelCase_ ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = input_dict['input_ids'] lowerCamelCase__ = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowerCamelCase__ = OpenLlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase__ = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = 'single_label_classification' lowerCamelCase__ = input_dict['input_ids'] lowerCamelCase__ = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) lowerCamelCase__ = OpenLlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase__ = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = 3 lowerCamelCase__ = 'multi_label_classification' lowerCamelCase__ = input_dict['input_ids'] lowerCamelCase__ = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase__ = OpenLlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase__ = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def UpperCamelCase_ ( self ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = ids_tensor([1, 10] ,config.vocab_size ) lowerCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase__ = OpenLlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() lowerCamelCase__ = original_model(UpperCamelCase_ ).last_hidden_state lowerCamelCase__ = original_model(UpperCamelCase_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase__ = {'type': scaling_type, 'factor': 10.0} lowerCamelCase__ = OpenLlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() lowerCamelCase__ = scaled_model(UpperCamelCase_ ).last_hidden_state lowerCamelCase__ = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-5 ) )
704
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = R"""\w+[.]\d+""" lowerCamelCase__ = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: lowerCamelCase__ = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) ) return key def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=42 ): # Step 1: Convert pytorch tensor to numpy lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) lowerCamelCase__ = flatten_dict(__lowerCAmelCase ) lowerCamelCase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ = rename_key(__lowerCAmelCase ) lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCamelCase__ = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
9
0
'''simple docstring''' from math import factorial UpperCamelCase : Tuple = {str(d): factorial(d) for d in range(10)} def A__ ( __lowerCAmelCase : int ): return sum(DIGIT_FACTORIAL[d] for d in str(_A ) ) def A__ ( ): lowerCamelCase__ = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , _A ) if sum_of_digit_factorial(_A ) == i ) if __name__ == "__main__": print(F'{solution() = }')
705
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """patrickvonplaten/t5-tiny-random""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
9
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCamelCase : int = logging.get_logger(__name__) logging.set_verbosity_info() def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ): if "xprophetnet" in prophetnet_checkpoint_path: lowerCamelCase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_ ) lowerCamelCase__ = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ ) else: lowerCamelCase__ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase_ ) lowerCamelCase__ = ProphetNetForConditionalGeneration.from_pretrained( lowerCamelCase_ , output_loading_info=lowerCamelCase_ ) lowerCamelCase__ = ['key_proj', 'value_proj', 'query_proj'] lowerCamelCase__ = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: lowerCamelCase__ = key.split(""".""" ) if attributes[0] == "lm_head": lowerCamelCase__ = prophet lowerCamelCase__ = prophet_old else: lowerCamelCase__ = prophet.prophetnet lowerCamelCase__ = prophet_old.model lowerCamelCase__ = False for attribute in attributes: if attribute in mapping: lowerCamelCase__ = mapping[attribute] if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0: lowerCamelCase__ = attribute elif hasattr(lowerCamelCase_ , lowerCamelCase_ ): lowerCamelCase__ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowerCamelCase__ = old_model.weight logger.info(F'''{attribute} is initialized.''' ) lowerCamelCase__ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowerCamelCase__ = old_model.bias logger.info(F'''{attribute} is initialized''' ) lowerCamelCase__ = True break elif attribute in special_keys and hasattr(lowerCamelCase_ , """in_proj_weight""" ): lowerCamelCase__ = old_model.in_proj_weight.shape[0] // 3 lowerCamelCase__ = getattr(lowerCamelCase_ , lowerCamelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowerCamelCase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowerCamelCase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowerCamelCase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowerCamelCase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowerCamelCase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowerCamelCase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowerCamelCase__ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." lowerCamelCase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] ) lowerCamelCase__ = True break if attribute.isdigit(): lowerCamelCase__ = model[int(lowerCamelCase_ )] lowerCamelCase__ = old_model[int(lowerCamelCase_ )] else: lowerCamelCase__ = getattr(lowerCamelCase_ , lowerCamelCase_ ) if old_attribute == "": lowerCamelCase__ = old_model else: if not hasattr(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) lowerCamelCase__ = getattr(lowerCamelCase_ , lowerCamelCase_ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
706
'''simple docstring''' from math import factorial UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def A__ ( __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCamelCase__ = 0 # the cached sizes of the previous chains lowerCamelCase__ = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain lowerCamelCase__ = set() lowerCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
9
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Optional[int] = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
707
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
0
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[Any] = logging.get_logger(__name__) def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = RobertaPreLayerNormConfig.from_pretrained( __A , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict lowerCamelCase__ = torch.load(hf_hub_download(repo_id=__A , filename="""pytorch_model.bin""" ) ) lowerCamelCase__ = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): lowerCamelCase__ = '''roberta_prelayernorm.''' + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue lowerCamelCase__ = tensor_value lowerCamelCase__ = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__A , config=__A , state_dict=__A ) model.save_pretrained(__A ) # convert tokenizer lowerCamelCase__ = AutoTokenizer.from_pretrained(__A ) tokenizer.save_pretrained(__A ) if __name__ == "__main__": UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCamelCase : Union[str, Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
708
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer _UpperCamelCase = False _UpperCamelCase = True _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """This is a test""" lowerCamelCase__ = """This is a test""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = """<s>""" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) ,_lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<unk>""" ) self.assertEqual(vocab_keys[1] ,"""<s>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(_lowerCAmelCase ) ,20_00 ) def UpperCamelCase_ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,20_00 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[4_65, 2_87, 2_65, 6_31, 8_42] ) lowerCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,) # fmt: on lowerCamelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase ,[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] ,) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) # fmt: off self.assertListEqual( _lowerCAmelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCamelCase_ ( self ): lowerCamelCase__ = GPTSwaTokenizer(_lowerCAmelCase ) lowerCamelCase__ = ["""This is a test""", """I was born in 92000, and this is falsé."""] lowerCamelCase__ = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCAmelCase ,_lowerCAmelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCAmelCase ) ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off lowerCamelCase__ = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCAmelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=_lowerCAmelCase ,)
9
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } UpperCamelCase : Optional[int] = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCamelCase__ = "lm_head" lowerCamelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: lowerCamelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: lowerCamelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase__ = value elif weight_type == "weight_g": lowerCamelCase__ = value elif weight_type == "weight_v": lowerCamelCase__ = value elif weight_type == "bias": lowerCamelCase__ = value else: lowerCamelCase__ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [] lowerCamelCase__ = fairseq_model.state_dict() lowerCamelCase__ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase__ = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase__ = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase__ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase__ = True if "*" in mapped_key: lowerCamelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] lowerCamelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: lowerCamelCase__ = "weight_g" elif "weight_v" in name: lowerCamelCase__ = "weight_v" elif "bias" in name: lowerCamelCase__ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase__ = "weight" else: lowerCamelCase__ = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ): lowerCamelCase__ = full_name.split("""conv_layers.""" )[-1] lowerCamelCase__ = name.split(""".""" ) lowerCamelCase__ = int(items[0] ) lowerCamelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCamelCase__ ) @torch.no_grad() def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=True ): if config_path is not None: lowerCamelCase__ = UniSpeechConfig.from_pretrained(lowerCamelCase__ ) else: lowerCamelCase__ = UniSpeechConfig() if is_finetuned: if dict_path: lowerCamelCase__ = Dictionary.load_from_json(lowerCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase__ = target_dict.pad_index lowerCamelCase__ = target_dict.bos_index lowerCamelCase__ = target_dict.eos_index lowerCamelCase__ = len(target_dict.symbols ) lowerCamelCase__ = os.path.join(lowerCamelCase__ , """vocab.json""" ) if not os.path.isdir(lowerCamelCase__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) lowerCamelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase__ = 42 lowerCamelCase__ = 43 with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase__ = WavaVecaPhonemeCTCTokenizer( lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , ) lowerCamelCase__ = True if config.feat_extract_norm == "layer" else False lowerCamelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) lowerCamelCase__ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) lowerCamelCase__ = UniSpeechForCTC(lowerCamelCase__ ) else: lowerCamelCase__ = UniSpeechForPreTraining(lowerCamelCase__ ) if is_finetuned: lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCamelCase__ = model[0].eval() recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) hf_unispeech.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) UpperCamelCase : Optional[int] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
709
'''simple docstring''' from manim import * class UpperCamelCase__ (a ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 ) lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""CPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(1 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""GPU""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [mem.copy() for i in range(6 )] lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 ) lowerCamelCase__ = Text("""Model""" ,font_size=24 ) lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,) lowerCamelCase__ = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) lowerCamelCase__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) ) self.add(_lowerCAmelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = [] for i, rect in enumerate(_lowerCAmelCase ): lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 ) cpu_target.move_to(_lowerCAmelCase ) cpu_target.generate_target() lowerCamelCase__ = 0.46 / 4 lowerCamelCase__ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 ) cpu_targs.append(_lowerCAmelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) ) second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(*_lowerCAmelCase ) self.wait()
9
0
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer UpperCamelCase : Dict = logging.get_logger(__name__) class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'AutoTokenizer' _UpperCamelCase = ['tokenizer'] _UpperCamelCase = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): super().__init__(__A ) lowerCamelCase__ = speaker_embeddings @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase="speaker_embeddings_path.json" ,**_lowerCAmelCase ): if speaker_embeddings_dict_path is not None: lowerCamelCase__ = get_file_from_repo( __A ,__A ,subfolder=kwargs.pop("""subfolder""" ,__A ) ,cache_dir=kwargs.pop("""cache_dir""" ,__A ) ,force_download=kwargs.pop("""force_download""" ,__A ) ,proxies=kwargs.pop("""proxies""" ,__A ) ,resume_download=kwargs.pop("""resume_download""" ,__A ) ,local_files_only=kwargs.pop("""local_files_only""" ,__A ) ,use_auth_token=kwargs.pop("""use_auth_token""" ,__A ) ,revision=kwargs.pop("""revision""" ,__A ) ,) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(__A ,__A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) lowerCamelCase__ = None else: with open(__A ) as speaker_embeddings_json: lowerCamelCase__ = json.load(__A ) else: lowerCamelCase__ = None lowerCamelCase__ = AutoTokenizer.from_pretrained(__A ,**__A ) return cls(tokenizer=__A ,speaker_embeddings=__A ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase="speaker_embeddings_path.json" ,_lowerCAmelCase="speaker_embeddings" ,_lowerCAmelCase = False ,**_lowerCAmelCase ,): if self.speaker_embeddings is not None: os.makedirs(os.path.join(__A ,__A ,"""v2""" ) ,exist_ok=__A ) lowerCamelCase__ = {} lowerCamelCase__ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCamelCase__ = self._load_voice_preset(__A ) lowerCamelCase__ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] ,__A ,F'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=__A ,) lowerCamelCase__ = os.path.join(__A ,F'''{prompt_key}_{key}.npy''' ) lowerCamelCase__ = tmp_dict with open(os.path.join(__A ,__A ) ,"""w""" ) as fp: json.dump(__A ,__A ) super().save_pretrained(__A ,__A ,**__A ) def UpperCamelCase_ ( self ,_lowerCAmelCase = None ,**_lowerCAmelCase ): lowerCamelCase__ = self.speaker_embeddings[voice_preset] lowerCamelCase__ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) lowerCamelCase__ = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" ,"""/""" ) ,voice_preset_paths[key] ,subfolder=kwargs.pop("""subfolder""" ,__A ) ,cache_dir=kwargs.pop("""cache_dir""" ,__A ) ,force_download=kwargs.pop("""force_download""" ,__A ) ,proxies=kwargs.pop("""proxies""" ,__A ) ,resume_download=kwargs.pop("""resume_download""" ,__A ) ,local_files_only=kwargs.pop("""local_files_only""" ,__A ) ,use_auth_token=kwargs.pop("""use_auth_token""" ,__A ) ,revision=kwargs.pop("""revision""" ,__A ) ,) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' ) lowerCamelCase__ = np.load(__A ) return voice_preset_dict def UpperCamelCase_ ( self ,_lowerCAmelCase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] ,np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase="pt" ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,**_lowerCAmelCase ,): if voice_preset is not None and not isinstance(__A ,__A ): if ( isinstance(__A ,__A ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCamelCase__ = self._load_voice_preset(__A ) else: if isinstance(__A ,__A ) and not voice_preset.endswith(""".npz""" ): lowerCamelCase__ = voice_preset + ".npz" lowerCamelCase__ = np.load(__A ) if voice_preset is not None: self._validate_voice_preset_dict(__A ,**__A ) lowerCamelCase__ = BatchFeature(data=__A ,tensor_type=__A ) lowerCamelCase__ = self.tokenizer( __A ,return_tensors=__A ,padding="""max_length""" ,max_length=__A ,return_attention_mask=__A ,return_token_type_ids=__A ,add_special_tokens=__A ,**__A ,) if voice_preset is not None: lowerCamelCase__ = voice_preset return encoded_text
710
'''simple docstring''' UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase : list[bool | None] = [None] * 10_00_00_00 UpperCamelCase : Tuple = True UpperCamelCase : Optional[int] = False def A__ ( __lowerCAmelCase : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) ) lowerCamelCase__ = number_chain while number < 1000_0000: lowerCamelCase__ = number_chain number *= 10 return number_chain def A__ ( __lowerCAmelCase : int = 1000_0000 ): for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
9
0
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class UpperCamelCase__ (UpperCamelCase_ ): def UpperCamelCase_ ( self ): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def UpperCamelCase_ ( self ): lowerCamelCase__ = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(_a ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self._create_example_records() lowerCamelCase__ = Dataset.from_list(_a ) self.assertListEqual(dset.column_names ,["""col_1""", """col_2"""] ) for i, r in enumerate(_a ): self.assertDictEqual(_a ,example_records[i] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self._create_example_records() lowerCamelCase__ = Dataset.from_list(_a ) lowerCamelCase__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info ,dset_from_dict.info ) def UpperCamelCase_ ( self ): # checks what happens with missing columns lowerCamelCase__ = [{"""col_1""": 1}, {"""col_2""": """x"""}] lowerCamelCase__ = Dataset.from_list(_a ) self.assertDictEqual(dset[0] ,{"""col_1""": 1} ) self.assertDictEqual(dset[1] ,{"""col_1""": None} ) # NB: first record is used for columns def UpperCamelCase_ ( self ): # checks if the type can be inferred from the second record lowerCamelCase__ = [{"""col_1""": []}, {"""col_1""": [1, 2]}] lowerCamelCase__ = Dataset.from_list(_a ) self.assertEqual(dset.info.features["""col_1"""] ,Sequence(Value("""int64""" ) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = Dataset.from_list([] ) self.assertEqual(len(_a ) ,0 ) self.assertListEqual(dset.column_names ,[] )
711
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[str] = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = 'donut-swin' _UpperCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,_lowerCAmelCase=2_24 ,_lowerCAmelCase=4 ,_lowerCAmelCase=3 ,_lowerCAmelCase=96 ,_lowerCAmelCase=[2, 2, 6, 2] ,_lowerCAmelCase=[3, 6, 12, 24] ,_lowerCAmelCase=7 ,_lowerCAmelCase=4.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=False ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,**_lowerCAmelCase ,): super().__init__(**_lowerCAmelCase ) lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = embed_dim lowerCamelCase__ = depths lowerCamelCase__ = len(_lowerCAmelCase ) lowerCamelCase__ = num_heads lowerCamelCase__ = window_size lowerCamelCase__ = mlp_ratio lowerCamelCase__ = qkv_bias lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = drop_path_rate lowerCamelCase__ = hidden_act lowerCamelCase__ = use_absolute_embeddings lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
9
0
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCamelCase__ (_snake_case ): '''simple docstring''' _UpperCamelCase = 'char' _UpperCamelCase = 'bpe' _UpperCamelCase = 'wp' UpperCamelCase : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCamelCase__ (_snake_case ): '''simple docstring''' _UpperCamelCase = ['image_processor', 'char_tokenizer'] _UpperCamelCase = 'ViTImageProcessor' _UpperCamelCase = 'MgpstrTokenizer' def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): lowerCamelCase__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_lowerCAmelCase ,) lowerCamelCase__ = kwargs.pop("""feature_extractor""" ) lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) lowerCamelCase__ = tokenizer lowerCamelCase__ = AutoTokenizer.from_pretrained("""gpt2""" ) lowerCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(_lowerCAmelCase ,_lowerCAmelCase ) def __call__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ): if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: lowerCamelCase__ = self.image_processor(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ) if text is not None: lowerCamelCase__ = self.char_tokenizer(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase__ = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = sequences lowerCamelCase__ = char_preds.size(0 ) lowerCamelCase__ , lowerCamelCase__ = self._decode_helper(_lowerCAmelCase ,"""char""" ) lowerCamelCase__ , lowerCamelCase__ = self._decode_helper(_lowerCAmelCase ,"""bpe""" ) lowerCamelCase__ , lowerCamelCase__ = self._decode_helper(_lowerCAmelCase ,"""wp""" ) lowerCamelCase__ = [] lowerCamelCase__ = [] for i in range(_lowerCAmelCase ): lowerCamelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCamelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCamelCase__ = scores.index(max(_lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCamelCase__ = {} lowerCamelCase__ = final_strs lowerCamelCase__ = final_scores lowerCamelCase__ = char_strs lowerCamelCase__ = bpe_strs lowerCamelCase__ = wp_strs return out def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): if format == DecodeType.CHARACTER: lowerCamelCase__ = self.char_decode lowerCamelCase__ = 1 lowerCamelCase__ = """[s]""" elif format == DecodeType.BPE: lowerCamelCase__ = self.bpe_decode lowerCamelCase__ = 2 lowerCamelCase__ = """#""" elif format == DecodeType.WORDPIECE: lowerCamelCase__ = self.wp_decode lowerCamelCase__ = 1_02 lowerCamelCase__ = """[SEP]""" else: raise ValueError(F'''Format {format} is not supported.''' ) lowerCamelCase__ , lowerCamelCase__ = [], [] lowerCamelCase__ = pred_logits.size(0 ) lowerCamelCase__ = pred_logits.size(1 ) lowerCamelCase__ , lowerCamelCase__ = pred_logits.topk(1 ,dim=-1 ,largest=_lowerCAmelCase ,sorted=_lowerCAmelCase ) lowerCamelCase__ = preds_index.view(-1 ,_lowerCAmelCase )[:, 1:] lowerCamelCase__ = decoder(_lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ = torch.nn.functional.softmax(_lowerCAmelCase ,dim=2 ).max(dim=2 ) lowerCamelCase__ = preds_max_prob[:, 1:] for index in range(_lowerCAmelCase ): lowerCamelCase__ = preds_str[index].find(_lowerCAmelCase ) lowerCamelCase__ = preds_str[index][:pred_eos] lowerCamelCase__ = preds_index[index].cpu().tolist() lowerCamelCase__ = pred_index.index(_lowerCAmelCase ) if eos_token in pred_index else -1 lowerCamelCase__ = preds_max_prob[index][: pred_eos_index + 1] lowerCamelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_lowerCAmelCase ) conf_scores.append(_lowerCAmelCase ) return dec_strs, conf_scores def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [seq.replace(""" """ ,"""""" ) for seq in self.char_tokenizer.batch_decode(_lowerCAmelCase )] return decode_strs def UpperCamelCase_ ( self ,_lowerCAmelCase ): return self.bpe_tokenizer.batch_decode(_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [seq.replace(""" """ ,"""""" ) for seq in self.wp_tokenizer.batch_decode(_lowerCAmelCase )] return decode_strs
712
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCamelCase : Optional[Any] = ['small', 'medium', 'large'] UpperCamelCase : Dict = 'lm_head.decoder.weight' UpperCamelCase : int = 'lm_head.weight' def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): lowerCamelCase__ = torch.load(__lowerCAmelCase ) lowerCamelCase__ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl') UpperCamelCase : str = F'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
9
0
'''simple docstring''' def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = [0] * len(lowerCAmelCase__ ) lowerCamelCase__ = [] lowerCamelCase__ = [1] * len(lowerCAmelCase__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCAmelCase__ ) ): if indegree[i] == 0: queue.append(lowerCAmelCase__ ) while queue: lowerCamelCase__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCamelCase__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowerCAmelCase__ ) print(max(lowerCAmelCase__ ) ) # Adjacency list of Graph UpperCamelCase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
713
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=30 ,_lowerCAmelCase=2 ,_lowerCAmelCase=3 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=32 ,_lowerCAmelCase=2 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=0.6 ,_lowerCAmelCase=None ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = mask_ratio lowerCamelCase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEModel(config=_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) # expected sequence length = num_patches lowerCamelCase__ = (self.image_size // self.patch_size) ** 2 lowerCamelCase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTMAEForPreTraining(_lowerCAmelCase ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(_lowerCAmelCase ,training=_lowerCAmelCase ) lowerCamelCase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (a ,a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _UpperCamelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,has_text_modality=_lowerCAmelCase ,hidden_size=37 ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase ,tf.keras.layers.Layer ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = outputs_dict[0].numpy() lowerCamelCase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 ) def UpperCamelCase_ ( self ): # make the mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCAmelCase ): lowerCamelCase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCAmelCase ): lowerCamelCase__ = v.numpy() else: lowerCamelCase__ = np.array(_lowerCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = prepare_numpy_arrays(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): # make masks reproducible np.random.seed(2 ) lowerCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.constant(_lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase__ = tf_noise super().check_pt_tf_models(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCAmelCase ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_lowerCAmelCase ,_lowerCAmelCase ),) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCAmelCase ,"""_keras_serializable""" ,_lowerCAmelCase ) } lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase__ = tf.convert_to_tensor(_lowerCAmelCase ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase__ = main_layer_class(_lowerCAmelCase ) lowerCamelCase__ = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase__ = tf.keras.Model(_lowerCAmelCase ,outputs=main_layer(_lowerCAmelCase ) ) lowerCamelCase__ = model(_lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase__ = os.path.join(_lowerCAmelCase ,"""keras_model.h5""" ) model.save(_lowerCAmelCase ) lowerCamelCase__ = tf.keras.models.load_model( _lowerCAmelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCAmelCase ,tf.keras.Model ) lowerCamelCase__ = model(_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @slow def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = outputs.last_hidden_state.numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = outputs.logits.numpy() lowerCamelCase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ,saved_model=_lowerCAmelCase ) lowerCamelCase__ = model_class.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase__ = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase__ = 0 else: lowerCamelCase__ = after_outputs["""logits"""].numpy() lowerCamelCase__ = 0 lowerCamelCase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCAmelCase ,1E-5 ) def UpperCamelCase_ ( self ): # make mask reproducible np.random.seed(2 ) lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase__ = model_class(_lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = model(_lowerCAmelCase ,noise=_lowerCAmelCase ) lowerCamelCase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCAmelCase ) lowerCamelCase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase__ = model_class.from_config(model.config ) lowerCamelCase__ = new_model(_lowerCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase__ = new_model(_lowerCAmelCase ,noise=_lowerCAmelCase ) self.assert_outputs_same(_lowerCAmelCase ,_lowerCAmelCase ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def UpperCamelCase_ ( self ): pass @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def UpperCamelCase_ ( self ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase__ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=_lowerCAmelCase ,return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase__ = ViTMAEConfig() lowerCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase__ = model(**_lowerCAmelCase ,noise=_lowerCAmelCase ) # verify the logits lowerCamelCase__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape ,_lowerCAmelCase ) lowerCamelCase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_lowerCAmelCase ,atol=1E-4 )
9
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = torch.permute(__A , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__A ): # linear layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ): if "metadata" in layer: lowerCamelCase__ = layer.split("""metadata""" ) lowerCamelCase__ = """""".join(split_layer[0] )[:-1] lowerCamelCase__ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: lowerCamelCase__ = layer.split("""kvstore""" ) lowerCamelCase__ = """""".join(split_layer[0] )[:-1] lowerCamelCase__ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: lowerCamelCase__ = layer.split("""/""" ) lowerCamelCase__ = """/""".join(split_layer[:-1] ) lowerCamelCase__ = (split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase__ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: lowerCamelCase__ = """file""" else: lowerCamelCase__ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ): lowerCamelCase__ = rename_keys(__A ) lowerCamelCase__ = {} for k, v in current_block.items(): lowerCamelCase__ = v lowerCamelCase__ = new_current_block torch.save(__A , __A ) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str = WEIGHTS_NAME ): lowerCamelCase__ = convert_file_size_to_int(__A ) lowerCamelCase__ = [] lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 os.makedirs(__A , exist_ok=__A ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: lowerCamelCase__ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] lowerCamelCase__ = flatten_dict(__A , sep="""/""" ) lowerCamelCase__ = {} for layer in checkpoint_info.keys(): lowerCamelCase__ = get_key_and_tensorstore_dict( __A , __A , __A ) if curr_real_layer_name in all_layers: lowerCamelCase__ = content else: lowerCamelCase__ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase__ = torch.tensor(__A ) lowerCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase__ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __A ) lowerCamelCase__ = """/""".join(__A ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase__ = os.path.join( __A , weights_name.replace(""".bin""" , F'''-{len(__A )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__A , __A ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = raw_weights.to(getattr(__A , __A ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase__ = os.path.join(__A , weights_name.replace(""".bin""" , F'''-{len(__A )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__A , __A ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__A ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase__ = {} lowerCamelCase__ = {} for idx, shard in enumerate(__A ): lowerCamelCase__ = weights_name.replace( """.bin""" , F'''-{idx+1:05d}-of-{len(__A ):05d}.bin''' ) # len(sharded_state_dicts):05d} lowerCamelCase__ = os.path.join(__A , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__A , os.path.join(__A , __A ) ) lowerCamelCase__ = shard for key in shard: lowerCamelCase__ = shard_file # Add the metadata lowerCamelCase__ = {"""total_size""": total_size} lowerCamelCase__ = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__A , __A ) , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n""" f.write(__A ) return metadata, index if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) UpperCamelCase : str = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A__ ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase__ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) lowerCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) lowerCamelCase__ = TaTokenizer.from_pretrained("""t5-small""" ) lowerCamelCase__ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" lowerCamelCase__ = tokenizer(__A , return_tensors="""pt""" ).input_ids lowerCamelCase__ = model.generate(__A , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
714
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
0
def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = 1 for i in range(1 , num + 1 ): fact *= i return fact def A__ ( __lowerCAmelCase : Optional[Any] ): lowerCamelCase__ = 0 while number > 0: lowerCamelCase__ = number % 10 sum_of_digits += last_digit lowerCamelCase__ = number // 10 # Removing the last_digit from the given number return sum_of_digits def A__ ( __lowerCAmelCase : str = 100 ): lowerCamelCase__ = factorial(_lowerCamelCase ) lowerCamelCase__ = split_and_add(_lowerCamelCase ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
715
'''simple docstring''' import numpy # List of input, output pairs UpperCamelCase : List[Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) UpperCamelCase : int = [2, 4, 1, 5] UpperCamelCase : int = len(train_data) UpperCamelCase : Dict = 0.009 def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ): return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output( __lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Any ): lowerCamelCase__ = 0 for i in range(len(__lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ): lowerCamelCase__ = 0 for i in range(__lowerCAmelCase ): if index == -1: summation_value += _error(__lowerCAmelCase ) else: summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index] return summation_value def A__ ( __lowerCAmelCase : List[Any] ): lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m return cost_derivative_value def A__ ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase__ = 0.00_0002 lowerCamelCase__ = 0 lowerCamelCase__ = 0 while True: j += 1 lowerCamelCase__ = [0, 0, 0, 0] for i in range(0 , len(__lowerCAmelCase ) ): lowerCamelCase__ = get_cost_derivative(i - 1 ) lowerCamelCase__ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ): break lowerCamelCase__ = temp_parameter_vector print(("""Number of iterations:""", j) ) def A__ ( ): for i in range(len(__lowerCAmelCase ) ): print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
9
0
'''simple docstring''' import datasets UpperCamelCase : Any = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n" UpperCamelCase : Union[str, Any] = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n" UpperCamelCase : str = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n" def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ), } ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return {"accuracy": simple_accuracy(__lowerCamelCase ,__lowerCamelCase )}
716
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase ) lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] lowerCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowerCamelCase__ = {} lowerCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] # extract state_dict for UNetLDM lowerCamelCase__ = {} lowerCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): lowerCamelCase__ = state_dict[key] lowerCamelCase__ = config.model.params.first_stage_config.params lowerCamelCase__ = config.model.params.unet_config.params lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) lowerCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) UpperCamelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
9
0
def A__ ( __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 1000 ): lowerCamelCase__ = 1 lowerCamelCase__ = 0 for divide_by_number in range(a_ , digit + 1 ): lowerCamelCase__ = [] lowerCamelCase__ = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(a_ ): lowerCamelCase__ = len(a_ ) lowerCamelCase__ = divide_by_number else: has_been_divided.append(a_ ) lowerCamelCase__ = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
717
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
0
'''simple docstring''' UpperCamelCase : List[Any] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' UpperCamelCase : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] UpperCamelCase : Union[str, Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
718
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ): lowerCamelCase__ = data # Initialize hash values lowerCamelCase__ = [ 0x6a_09_e6_67, 0xbb_67_ae_85, 0x3c_6e_f3_72, 0xa5_4f_f5_3a, 0x51_0e_52_7f, 0x9b_05_68_8c, 0x1f_83_d9_ab, 0x5b_e0_cd_19, ] # Initialize round constants lowerCamelCase__ = [ 0x42_8a_2f_98, 0x71_37_44_91, 0xb5_c0_fb_cf, 0xe9_b5_db_a5, 0x39_56_c2_5b, 0x59_f1_11_f1, 0x92_3f_82_a4, 0xab_1c_5e_d5, 0xd8_07_aa_98, 0x12_83_5b_01, 0x24_31_85_be, 0x55_0c_7d_c3, 0x72_be_5d_74, 0x80_de_b1_fe, 0x9b_dc_06_a7, 0xc1_9b_f1_74, 0xe4_9b_69_c1, 0xef_be_47_86, 0x0f_c1_9d_c6, 0x24_0c_a1_cc, 0x2d_e9_2c_6f, 0x4a_74_84_aa, 0x5c_b0_a9_dc, 0x76_f9_88_da, 0x98_3e_51_52, 0xa8_31_c6_6d, 0xb0_03_27_c8, 0xbf_59_7f_c7, 0xc6_e0_0b_f3, 0xd5_a7_91_47, 0x06_ca_63_51, 0x14_29_29_67, 0x27_b7_0a_85, 0x2e_1b_21_38, 0x4d_2c_6d_fc, 0x53_38_0d_13, 0x65_0a_73_54, 0x76_6a_0a_bb, 0x81_c2_c9_2e, 0x92_72_2c_85, 0xa2_bf_e8_a1, 0xa8_1a_66_4b, 0xc2_4b_8b_70, 0xc7_6c_51_a3, 0xd1_92_e8_19, 0xd6_99_06_24, 0xf4_0e_35_85, 0x10_6a_a0_70, 0x19_a4_c1_16, 0x1e_37_6c_08, 0x27_48_77_4c, 0x34_b0_bc_b5, 0x39_1c_0c_b3, 0x4e_d8_aa_4a, 0x5b_9c_ca_4f, 0x68_2e_6f_f3, 0x74_8f_82_ee, 0x78_a5_63_6f, 0x84_c8_78_14, 0x8c_c7_02_08, 0x90_be_ff_fa, 0xa4_50_6c_eb, 0xbe_f9_a3_f7, 0xc6_71_78_f2, ] lowerCamelCase__ = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCamelCase_ ( _lowerCAmelCase ): lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64)) lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) ) return data + padding + big_endian_integer def UpperCamelCase_ ( self ): # Convert into blocks of 64 bytes lowerCamelCase__ = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowerCamelCase__ = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowerCamelCase__ = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowerCamelCase__ = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 ) lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g) lowerCamelCase__ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 ) lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c) lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00 lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) lowerCamelCase__ = [a, b, c, d, e, f, g, h] # Modify final values lowerCamelCase__ = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): import hashlib lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() ) def A__ ( ): import doctest doctest.testmod() lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" ) print(SHAaaa(__lowerCAmelCase ).hash ) if __name__ == "__main__": main()
9
0
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCamelCase__ : '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,_lowerCAmelCase=10 ,_lowerCAmelCase=3 ,_lowerCAmelCase=32 * 8 ,_lowerCAmelCase=32 * 8 ,_lowerCAmelCase=4 ,_lowerCAmelCase=64 ,): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = is_training lowerCamelCase__ = use_auxiliary_loss lowerCamelCase__ = num_queries lowerCamelCase__ = num_channels lowerCamelCase__ = min_size lowerCamelCase__ = max_size lowerCamelCase__ = num_labels lowerCamelCase__ = hidden_dim lowerCamelCase__ = hidden_dim def UpperCamelCase_ ( self ): lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) lowerCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=snake_case_ ) lowerCamelCase__ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=snake_case_ ) > 0.5 ).float() lowerCamelCase__ = (torch.rand((self.batch_size, self.num_labels) ,device=snake_case_ ) > 0.5).long() lowerCamelCase__ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskaFormerConfig( hidden_size=self.hidden_dim ,) lowerCamelCase__ = self.num_queries lowerCamelCase__ = self.num_labels lowerCamelCase__ = [1, 1, 1, 1] lowerCamelCase__ = self.num_channels lowerCamelCase__ = 64 lowerCamelCase__ = 1_28 lowerCamelCase__ = self.hidden_dim lowerCamelCase__ = self.hidden_dim lowerCamelCase__ = self.hidden_dim return config def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = output.encoder_hidden_states lowerCamelCase__ = output.pixel_decoder_hidden_states lowerCamelCase__ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) ,config.decoder_layers ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ): with torch.no_grad(): lowerCamelCase__ = MaskaFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase__ = model(pixel_values=snake_case_ ,pixel_mask=snake_case_ ) lowerCamelCase__ = model(snake_case_ ,output_hidden_states=snake_case_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ ,snake_case_ ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): lowerCamelCase__ = MaskaFormerForUniversalSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(_lowerCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCamelCase__ = model(pixel_values=snake_case_ ,pixel_mask=snake_case_ ) lowerCamelCase__ = model(snake_case_ ) comm_check_on_output(snake_case_ ) lowerCamelCase__ = model( pixel_values=snake_case_ ,pixel_mask=snake_case_ ,mask_labels=snake_case_ ,class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class UpperCamelCase__ (_a ,_a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _UpperCamelCase = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskaFormerModelTester(self ) lowerCamelCase__ = ConfigTester(self ,config_class=snake_case_ ,has_text_modality=snake_case_ ) def UpperCamelCase_ ( self ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(snake_case_ ,**snake_case_ ,output_hidden_states=snake_case_ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def UpperCamelCase_ ( self ): pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def UpperCamelCase_ ( self ): pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def UpperCamelCase_ ( self ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(snake_case_ ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,snake_case_ ) @slow def UpperCamelCase_ ( self ): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: lowerCamelCase__ = MaskaFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def UpperCamelCase_ ( self ): lowerCamelCase__ = (self.model_tester.min_size,) * 2 lowerCamelCase__ = { """pixel_values""": torch.randn((2, 3, *size) ,device=snake_case_ ), """mask_labels""": torch.randn((2, 10, *size) ,device=snake_case_ ), """class_labels""": torch.zeros(2 ,10 ,device=snake_case_ ).long(), } lowerCamelCase__ = self.model_tester.get_config() lowerCamelCase__ = MaskaFormerForUniversalSegmentation(snake_case_ ).to(snake_case_ ) lowerCamelCase__ = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(snake_case_ ,**snake_case_ ,output_hidden_states=snake_case_ ) def UpperCamelCase_ ( self ): lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(snake_case_ ).to(snake_case_ ) lowerCamelCase__ = model(**snake_case_ ,output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase_ ( self ): if not self.model_tester.is_training: return lowerCamelCase__ = self.all_model_classes[1] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() lowerCamelCase__ = model_class(snake_case_ ) model.to(snake_case_ ) model.train() lowerCamelCase__ = model(snake_case_ ,mask_labels=snake_case_ ,class_labels=snake_case_ ).loss loss.backward() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.all_model_classes[1] lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = model_class(snake_case_ ).to(snake_case_ ) model.train() lowerCamelCase__ = model(snake_case_ ,mask_labels=snake_case_ ,class_labels=snake_case_ ) lowerCamelCase__ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCamelCase__ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() lowerCamelCase__ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCamelCase__ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCamelCase : List[str] = 1E-4 def A__ ( ): lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase_ ( self ): return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self ): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case_ ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(snake_case_ ,return_tensors="""pt""" ).to(snake_case_ ) lowerCamelCase__ = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case_ ,(1, 3, 3_84, 3_84) ) with torch.no_grad(): lowerCamelCase__ = model(**snake_case_ ) lowerCamelCase__ = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,snake_case_ ,atol=snake_case_ ) ) lowerCamelCase__ = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,snake_case_ ,atol=snake_case_ ) ) lowerCamelCase__ = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,snake_case_ ,atol=snake_case_ ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval() lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(snake_case_ ,return_tensors="""pt""" ).to(snake_case_ ) lowerCamelCase__ = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case_ ,(1, 3, 3_84, 3_84) ) with torch.no_grad(): lowerCamelCase__ = model(**snake_case_ ) # masks_queries_logits lowerCamelCase__ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) lowerCamelCase__ = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] lowerCamelCase__ = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,snake_case_ ,atol=snake_case_ ) ) # class_queries_logits lowerCamelCase__ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) lowerCamelCase__ = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,snake_case_ ,atol=snake_case_ ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval() lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] ,segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCamelCase__ = inputs["""pixel_values"""].to(snake_case_ ) lowerCamelCase__ = [el.to(snake_case_ ) for el in inputs["""mask_labels"""]] lowerCamelCase__ = [el.to(snake_case_ ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCamelCase__ = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
719
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Tuple ): lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) lowerCamelCase__ = emb.weight.data return lin_layer def A__ ( __lowerCAmelCase : Dict ): lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" ) lowerCamelCase__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""] lowerCamelCase__ = mam_aaa["""model"""] remove_ignore_keys_(__lowerCAmelCase ) lowerCamelCase__ = state_dict["""encoder.embed_tokens.weight"""].shape[0] lowerCamelCase__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , ) lowerCamelCase__ = state_dict["""decoder.embed_tokens.weight"""] lowerCamelCase__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) lowerCamelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCamelCase : Tuple = parser.parse_args() UpperCamelCase : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
9
0
'''simple docstring''' def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ): lowerCamelCase__ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: lowerCamelCase__ = 1 - (matter_density + radiation_density + dark_energy) lowerCamelCase__ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowerCamelCase__ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase : Any = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
720
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = tempfile.mkdtemp() lowerCamelCase__ = BlipImageProcessor() lowerCamelCase__ = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) lowerCamelCase__ = BlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer def UpperCamelCase_ ( self ,**_lowerCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor def UpperCamelCase_ ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ): lowerCamelCase__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )] lowerCamelCase__ = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ): lowerCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) lowerCamelCase__ = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 ) lowerCamelCase__ = BlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_lowerCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = image_processor(_lowerCAmelCase ,return_tensors="""np""" ) lowerCamelCase__ = processor(images=_lowerCAmelCase ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = processor(text=_lowerCAmelCase ) lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.get_image_processor() lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = BlipProcessor(tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = self.prepare_image_inputs() lowerCamelCase__ = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
9
0