code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import colorsys from PIL import Image # type: ignore def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : int ) -> float: '''simple docstring''' __UpperCAmelCase : Optional[int] = x __UpperCAmelCase : List[Any] = y for step in range(UpperCamelCase_ ): # noqa: B007 __UpperCAmelCase : List[str] = a * a - b * b + x __UpperCAmelCase : Any = 2 * a * b + y __UpperCAmelCase : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( _UpperCamelCase : float ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def lowerCamelCase ( _UpperCamelCase : float ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) ) def lowerCamelCase ( _UpperCamelCase : int = 8_0_0 , _UpperCamelCase : int = 6_0_0 , _UpperCamelCase : float = -0.6 , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 3.2 , _UpperCamelCase : int = 5_0 , _UpperCamelCase : bool = True , ) -> Image.Image: '''simple docstring''' __UpperCAmelCase : Dict = Image.new("""RGB""" , (image_width, image_height) ) __UpperCAmelCase : Union[str, Any] = img.load() # loop through the image-coordinates for image_x in range(UpperCamelCase_ ): for image_y in range(UpperCamelCase_ ): # determine the figure-coordinates based on the image-coordinates __UpperCAmelCase : Union[str, Any] = figure_width / image_width * image_height __UpperCAmelCase : str = figure_center_x + (image_x / image_width - 0.5) * figure_width __UpperCAmelCase : int = figure_center_y + (image_y / image_height - 0.5) * figure_height __UpperCAmelCase : Union[str, Any] = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __UpperCAmelCase : Any = get_color_coded_rgb(UpperCamelCase_ ) else: __UpperCAmelCase : Any = get_black_and_white_rgb(UpperCamelCase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure UpperCAmelCase : Any = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
115
from __future__ import annotations import os from collections.abc import Mapping a_ = tuple[int, int] class lowercase__ : def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None: '''simple docstring''' lowerCAmelCase__ = vertices lowerCAmelCase__ = { (min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items() } def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None: '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase__ = weight def UpperCAmelCase ( self )-> Graph: '''simple docstring''' lowerCAmelCase__ = Graph({min(self.vertices )} , {} ) lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase__ = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase__ = edge lowerCAmelCase__ = weight subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase ) return subgraph def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int: """simple docstring""" lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = {} lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 with open(UpperCamelCase_ ) as f: lowerCAmelCase__ = f.read().strip().split("\n" ) lowerCAmelCase__ = [line.split("," ) for line in data] for edgea in range(1 , len(UpperCamelCase_ ) ): for edgea in range(UpperCamelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ ) lowerCAmelCase__ = graph.prims_algorithm() lowerCAmelCase__ = sum(graph.edges.values() ) lowerCAmelCase__ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
340
0
import math def lowerCamelCase_ ( _UpperCamelCase ) -> bool: """simple docstring""" return math.sqrt(UpperCamelCase_ ) * math.sqrt(UpperCamelCase_ ) == num def lowerCamelCase_ ( _UpperCamelCase ) -> bool: """simple docstring""" snake_case_ : Union[str, Any] = 0 snake_case_ : List[str] = n while left <= right: snake_case_ : Dict = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: snake_case_ : Union[str, Any] = mid - 1 else: snake_case_ : Optional[Any] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
279
from collections import defaultdict from math import gcd def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int: """simple docstring""" lowerCAmelCase__ = defaultdict(UpperCamelCase_ ) lowerCAmelCase__ = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ): if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1: continue lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"{solution() = }")
340
0
'''simple docstring''' def UpperCamelCase_ ( A__ : int = 1 , A__ : int = 10_00 ): '''simple docstring''' lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : Tuple = 0 for divide_by_number in range(UpperCamelCase_ , digit + 1 ): lowerCAmelCase_ : Dict = [] lowerCAmelCase_ : List[str] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCamelCase_ ): lowerCAmelCase_ : List[Any] = len(UpperCamelCase_ ) lowerCAmelCase_ : int = divide_by_number else: has_been_divided.append(UpperCamelCase_ ) lowerCAmelCase_ : List[str] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
120
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase__ ( _UpperCAmelCase ): a_ =["""image_processor""", """tokenizer"""] a_ ="""LayoutLMv2ImageProcessor""" a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple: '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs.pop("feature_extractor" ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ = features["words"] lowerCAmelCase__ = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) # add pixel values lowerCAmelCase__ = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowerCAmelCase__ = images return encoded_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str: '''simple docstring''' lowerCAmelCase__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" ) return images_with_overflow def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self )-> str: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
340
0
def lowerCamelCase__ ( ) -> int: return 1 def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase_ ) def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase_ ) def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase_ ) def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase_ ) def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase_ ) def lowerCamelCase__ ( snake_case_ : int ) -> int: return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase_ ) def lowerCamelCase__ ( snake_case_ : int = 200 ) -> int: return two_pound(UpperCamelCase_ ) if __name__ == "__main__": print(solution(int(input().strip())))
24
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowercase__ ( unittest.TestCase ): def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = tempfile.mkdtemp() # fmt: off lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCAmelCase__ = {"unk_token": "<unk>"} lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__UpperCAmelCase ) ) lowerCAmelCase__ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCAmelCase ( self )-> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase ) def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase ) lowerCAmelCase__ = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" ) lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" ) lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> Any: '''simple docstring''' lowerCAmelCase__ = "google/owlvit-base-patch32" lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ = ["cat", "nasa badge"] lowerCAmelCase__ = processor(text=__UpperCAmelCase ) lowerCAmelCase__ = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "google/owlvit-base-patch32" lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]] lowerCAmelCase__ = processor(text=__UpperCAmelCase ) lowerCAmelCase__ = 16 lowerCAmelCase__ = len(__UpperCAmelCase ) lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> str: '''simple docstring''' lowerCAmelCase__ = "google/owlvit-base-patch32" lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ = ["cat", "nasa badge"] lowerCAmelCase__ = processor(text=__UpperCAmelCase ) lowerCAmelCase__ = 16 lowerCAmelCase__ = inputs["input_ids"] lowerCAmelCase__ = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
340
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :Tuple = logging.get_logger(__name__) _lowerCAmelCase :List[str] = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class _UpperCAmelCase ( _UpperCAmelCase ): '''simple docstring''' a__ ='''sew''' def __init__( self , A=3_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A=2 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.1 , A=0.02 , A=1E-5 , A="group" , A="gelu" , A=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=1_2_8 , A=1_6 , A=True , A=0.05 , A=1_0 , A=2 , A=0.0 , A=1_0 , A=0 , A="mean" , A=False , A=False , A=2_5_6 , A=0 , A=1 , A=2 , **A , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _UpperCAmelCase : Union[str, Any] = hidden_size _UpperCAmelCase : List[Any] = feat_extract_norm _UpperCAmelCase : Optional[int] = feat_extract_activation _UpperCAmelCase : Dict = list(__UpperCAmelCase ) _UpperCAmelCase : List[Any] = list(__UpperCAmelCase ) _UpperCAmelCase : Dict = list(__UpperCAmelCase ) _UpperCAmelCase : Tuple = conv_bias _UpperCAmelCase : List[Any] = num_conv_pos_embeddings _UpperCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups _UpperCAmelCase : str = len(self.conv_dim ) _UpperCAmelCase : str = num_hidden_layers _UpperCAmelCase : int = intermediate_size _UpperCAmelCase : Any = squeeze_factor _UpperCAmelCase : Dict = hidden_act _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Optional[Any] = feat_proj_dropout _UpperCAmelCase : Optional[Any] = final_dropout _UpperCAmelCase : Union[str, Any] = layerdrop _UpperCAmelCase : Union[str, Any] = layer_norm_eps _UpperCAmelCase : Optional[int] = initializer_range _UpperCAmelCase : List[Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase : Optional[int] = apply_spec_augment _UpperCAmelCase : int = mask_time_prob _UpperCAmelCase : Dict = mask_time_length _UpperCAmelCase : List[Any] = mask_time_min_masks _UpperCAmelCase : Optional[Any] = mask_feature_prob _UpperCAmelCase : Tuple = mask_feature_length _UpperCAmelCase : List[str] = mask_feature_min_masks # ctc loss _UpperCAmelCase : List[Any] = ctc_loss_reduction _UpperCAmelCase : List[Any] = ctc_zero_infinity # sequence classification _UpperCAmelCase : Dict = use_weighted_layer_sum _UpperCAmelCase : List[str] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> str: return functools.reduce(operator.mul , self.conv_stride , 1 )
263
from __future__ import annotations from cmath import sqrt def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]: """simple docstring""" if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) lowerCAmelCase__ = b * b - 4 * a * c lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a) lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def _a ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
340
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class _snake_case ( unittest.TestCase ): def _lowerCamelCase ( self: List[Any] ) -> Any: __UpperCAmelCase : str = tempfile.mkdtemp() # fmt: off __UpperCAmelCase : Dict = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on __UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) __UpperCAmelCase : List[Any] = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } __UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCamelCase ( self: Union[str, Any] , **__lowerCamelCase: List[str] ) -> Tuple: return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _lowerCamelCase ( self: str , **__lowerCamelCase: int ) -> int: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _lowerCamelCase ( self: List[str] ) -> List[str]: shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self: str ) -> Optional[Any]: __UpperCAmelCase : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __UpperCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCamelCase ( self: Optional[Any] ) -> str: __UpperCAmelCase : Union[str, Any] = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : int = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def _lowerCamelCase ( self: str ) -> int: __UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __UpperCAmelCase : Any = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def _lowerCamelCase ( self: Any ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = self.get_image_processor() __UpperCAmelCase : str = self.get_tokenizer() __UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.prepare_image_inputs() __UpperCAmelCase : List[str] = image_processor(__UpperCAmelCase , return_tensors="np" ) __UpperCAmelCase : str = processor(images=__UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]: __UpperCAmelCase : Any = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : int = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Any = "lower newer" __UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCamelCase ( self: Union[str, Any] ) -> Any: __UpperCAmelCase : int = self.get_image_processor() __UpperCAmelCase : Optional[int] = self.get_tokenizer() __UpperCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Tuple = "lower newer" __UpperCAmelCase : Any = self.prepare_image_inputs() __UpperCAmelCase : Dict = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(__UpperCAmelCase ): processor() def _lowerCamelCase ( self: Tuple ) -> Any: __UpperCAmelCase : str = self.get_image_processor() __UpperCAmelCase : Tuple = self.get_tokenizer() __UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : str = processor.batch_decode(__UpperCAmelCase ) __UpperCAmelCase : Dict = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCamelCase ( self: Optional[int] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = self.get_image_processor() __UpperCAmelCase : Dict = self.get_tokenizer() __UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = "lower newer" __UpperCAmelCase : Any = self.prepare_image_inputs() __UpperCAmelCase : Any = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
157
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(UpperCamelCase_ ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" ) lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" ) lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = number_of_qubits for i in range(UpperCamelCase_ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(UpperCamelCase_ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ ) # simulate with 10000 shots lowerCAmelCase__ = Aer.get_backend("qasm_simulator" ) lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 ) return job.result().get_counts(UpperCamelCase_ ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
340
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> int: '''simple docstring''' if index == number_of_items: return 0 A__ = 0 A__ = 0 A__ = knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ) if weights[index] <= max_weight: A__ = values[index] + knapsack( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_weight - weights[index] , index + 1 ) return max(UpperCamelCase_ , UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
68
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase__ ( _UpperCAmelCase ): a_ ="""char""" a_ ="""bpe""" a_ ="""wp""" a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase__ ( _UpperCAmelCase ): a_ =["""image_processor""", """char_tokenizer"""] a_ ="""ViTImageProcessor""" a_ ="""MgpstrTokenizer""" def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str: '''simple docstring''' lowerCAmelCase__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs.pop("feature_extractor" ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowerCAmelCase__ = tokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" ) lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]: '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCAmelCase__ = encodings["input_ids"] return inputs def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences lowerCAmelCase__ = char_preds.size(0 ) lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" ) lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" ) lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] for i in range(__UpperCAmelCase ): lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCAmelCase__ = {} lowerCAmelCase__ = final_strs lowerCAmelCase__ = final_scores lowerCAmelCase__ = char_strs lowerCAmelCase__ = bpe_strs lowerCAmelCase__ = wp_strs return out def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' if format == DecodeType.CHARACTER: lowerCAmelCase__ = self.char_decode lowerCAmelCase__ = 1 lowerCAmelCase__ = "[s]" elif format == DecodeType.BPE: lowerCAmelCase__ = self.bpe_decode lowerCAmelCase__ = 2 lowerCAmelCase__ = "#" elif format == DecodeType.WORDPIECE: lowerCAmelCase__ = self.wp_decode lowerCAmelCase__ = 102 lowerCAmelCase__ = "[SEP]" else: raise ValueError(F"Format {format} is not supported." ) lowerCAmelCase__ , lowerCAmelCase__ = [], [] lowerCAmelCase__ = pred_logits.size(0 ) lowerCAmelCase__ = pred_logits.size(1 ) lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase ) lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:] lowerCAmelCase__ = decoder(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 ) lowerCAmelCase__ = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase ): lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase ) lowerCAmelCase__ = preds_str[index][:pred_eos] lowerCAmelCase__ = preds_index[index].cpu().tolist() lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1 lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1] lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase ) conf_scores.append(__UpperCAmelCase ) return dec_strs, conf_scores def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return self.bpe_tokenizer.batch_decode(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs
340
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE__ ( self ): a :Any = 1 a :Optional[int] = 3 a :List[Any] = (32, 32) a :Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def SCREAMING_SNAKE_CASE__ ( self ): torch.manual_seed(0 ) a :int = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def SCREAMING_SNAKE_CASE__ ( self ): torch.manual_seed(0 ) a :List[Any] = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE__ ( self ): torch.manual_seed(0 ) a :int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator a :int = self.dummy_cond_unet_upscale a :Dict = DDPMScheduler() a :Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' ) a :str = self.dummy_vae a :Tuple = self.dummy_text_encoder a :Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) a :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] a :Dict = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk a :Dict = StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , ) a :Union[str, Any] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) a :int = '''A painting of a squirrel eating a burger''' a :int = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) a :Optional[int] = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) a :str = output.images a :List[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) a :Union[str, Any] = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCAmelCase , )[0] a :Any = image[0, -3:, -3:, -1] a :List[Any] = image_from_tuple[0, -3:, -3:, -1] a :List[str] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) a :Union[str, Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator a :str = self.dummy_cond_unet_upscale a :Optional[int] = DDPMScheduler() a :Any = DDIMScheduler(prediction_type='''v_prediction''' ) a :Union[str, Any] = self.dummy_vae a :List[Any] = self.dummy_text_encoder a :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) a :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] a :str = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk a :Tuple = StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , ) a :Any = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) a :List[str] = '''A painting of a squirrel eating a burger''' a :Any = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) a :str = output.images assert image.shape[0] == 2 a :Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) a :Dict = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) a :Tuple = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = self.dummy_cond_unet_upscale a :str = DDPMScheduler() a :str = DDIMScheduler(prediction_type='''v_prediction''' ) a :Any = self.dummy_vae a :Any = self.dummy_text_encoder a :Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) a :str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] a :List[str] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 a :Optional[int] = unet.half() a :Optional[int] = text_encoder.half() # make sure here that pndm scheduler skips prk a :int = StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , ) a :List[str] = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) a :Optional[int] = '''A painting of a squirrel eating a burger''' a :Optional[int] = torch.manual_seed(0 ) a :str = sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images a :Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) a :int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) a :List[Any] = '''stabilityai/stable-diffusion-x4-upscaler''' a :Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() a :str = '''a cat sitting on a park bench''' a :Tuple = torch.manual_seed(0 ) a :Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , ) a :Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) a :Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) a :int = '''stabilityai/stable-diffusion-x4-upscaler''' a :Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() a :Union[str, Any] = '''a cat sitting on a park bench''' a :List[str] = torch.manual_seed(0 ) a :str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , ) a :int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def SCREAMING_SNAKE_CASE__ ( self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a :Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) a :Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler''' a :Tuple = StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() a :Dict = '''a cat sitting on a park bench''' a :int = torch.manual_seed(0 ) a :List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , ) a :Optional[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
94
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
0
"""simple docstring""" from __future__ import annotations __SCREAMING_SNAKE_CASE : List[Any] = 8.9_88E9 # units = N * m^s * C^-2 def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> dict[str, float]: snake_case_ = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if distance < 0: raise ValueError("""Distance cannot be negative""" ) if force == 0: snake_case_ = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: snake_case_ = abs(UpperCamelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: snake_case_ = abs(UpperCamelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: snake_case_ = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase_ )) ** 0.5 return {"distance": distance} raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
347
from collections import defaultdict def _a ( UpperCamelCase_ : int ) -> int: """simple docstring""" lowerCAmelCase__ = 1 lowerCAmelCase__ = True for v in tree[start]: if v not in visited: ret += dfs(UpperCamelCase_ ) if ret % 2 == 0: cuts.append(UpperCamelCase_ ) return ret def _a ( ) -> Optional[Any]: """simple docstring""" dfs(1 ) if __name__ == "__main__": a_, a_ = 10, 9 a_ = defaultdict(list) a_ = {} a_ = [] a_ = 0 a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' a__ : str = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Union[str, Any]: __UpperCamelCase :Dict = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''') __UpperCamelCase :Tuple = VideoClassificationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase , top_k=2) __UpperCamelCase :Tuple = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[str]: for example in examples: __UpperCamelCase :List[str] = video_classifier(__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase), '''label''': ANY(__UpperCAmelCase)}, {'''score''': ANY(__UpperCAmelCase), '''label''': ANY(__UpperCAmelCase)}, ] , ) @require_torch def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :Union[str, Any] = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' __UpperCamelCase :List[Any] = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10}) __UpperCamelCase :List[str] = pipeline( '''video-classification''' , model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , frame_sampling_rate=4) __UpperCamelCase :List[Any] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''') __UpperCamelCase :Union[str, Any] = video_classifier(__UpperCAmelCase , top_k=2) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4) , [{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}] , ) __UpperCamelCase :Optional[int] = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4) , [ [{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}], [{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}], ] , ) @require_tf def UpperCamelCase__ ( self) -> Any: pass
43
import requests from bsa import BeautifulSoup def _a ( UpperCamelCase_ : str = "AAPL" ) -> str: """simple docstring""" lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" ) lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
340
0
"""simple docstring""" _UpperCAmelCase = { """km/h""": 1.0, """m/s""": 3.6, """mph""": 1.6_0_9_3_4_4, """knot""": 1.8_5_2, } _UpperCAmelCase = { """km/h""": 1.0, """m/s""": 0.2_7_7_7_7_7_7_7_8, """mph""": 0.6_2_1_3_7_1_1_9_2, """knot""": 0.5_3_9_9_5_6_8_0_3, } def __magic_name__ ( lowercase , lowercase , lowercase ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: SCREAMING_SNAKE_CASE_: List[Any] =( f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' f'''Valid values are: {", ".join(UpperCamelCase_ )}''' ) raise ValueError(UpperCamelCase_ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
173
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass a_ = (3, 9, -11, 0, 7, 5, 1, -1) a_ = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class lowercase__ : a_ =42 a_ =42 class lowercase__ : def __init__( self , __UpperCAmelCase )-> None: '''simple docstring''' lowerCAmelCase__ = None for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ): lowerCAmelCase__ = Node(__UpperCAmelCase , self.head ) def __iter__( self )-> Iterator[int]: '''simple docstring''' lowerCAmelCase__ = self.head while node: yield node.data lowerCAmelCase__ = node.next_node def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __str__( self )-> str: '''simple docstring''' return " -> ".join([str(__UpperCAmelCase ) for node in self] ) def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() a_ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
340
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int ) -> str: '''simple docstring''' if isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if num == 0: return "0b0" __UpperCAmelCase : List[str] = False if num < 0: __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : int = -num __UpperCAmelCase : Optional[Any] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(UpperCamelCase_ ) for e in binary ) return "0b" + "".join(str(UpperCamelCase_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
115
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline a_ = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": a_ = '''hopper-medium-v2''' a_ = gym.make(env_name) a_ = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) a_ = env.reset() a_ = 0 a_ = 0 a_ = 1000 a_ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy a_ = pipeline(obs, planning_horizon=32) # execute action in environment a_, a_, a_, a_ = env.step(denorm_actions) a_ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" F" {total_score}" ) # save observations for rendering rollout.append(next_observation.copy()) a_ = next_observation except KeyboardInterrupt: pass print(F"Total reward: {total_reward}")
340
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCAmelCase_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" if args.student_type == "roberta": snake_case_ : Optional[int] = False elif args.student_type == "gpt2": snake_case_ : str = False def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" if args.student_type == "roberta": snake_case_ : Optional[Any] = False def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=UpperCamelCase_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=UpperCamelCase_ , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=UpperCamelCase_ , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=UpperCamelCase_ , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=UpperCamelCase_ , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=UpperCamelCase_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=UpperCamelCase_ , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=UpperCamelCase_ , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=UpperCamelCase_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=UpperCamelCase_ , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=UpperCamelCase_ , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=UpperCamelCase_ , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=UpperCamelCase_ , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=UpperCamelCase_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=UpperCamelCase_ , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=UpperCamelCase_ , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=UpperCamelCase_ , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=UpperCamelCase_ , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=UpperCamelCase_ , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=UpperCamelCase_ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=UpperCamelCase_ , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=UpperCamelCase_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=UpperCamelCase_ , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=UpperCamelCase_ , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=UpperCamelCase_ , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=UpperCamelCase_ , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=UpperCamelCase_ , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=UpperCamelCase_ , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=UpperCamelCase_ , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=UpperCamelCase_ , default=4_000 , help='''Checkpoint interval.''' ) snake_case_ : Union[str, Any] = parser.parse_args() sanity_checks(UpperCamelCase_ ) # ARGS # init_gpu_params(UpperCamelCase_ ) set_seed(UpperCamelCase_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(UpperCamelCase_ ) , UpperCamelCase_ , indent=4 ) git_log(args.dump_path ) snake_case_ , snake_case_ , snake_case_ : str = MODEL_CLASSES[args.student_type] snake_case_ , snake_case_ , snake_case_ : str = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case_ : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case_ : List[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case_ : int = tokenizer.all_special_tokens.index(UpperCamelCase_ ) snake_case_ : Any = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) snake_case_ : str = special_tok_ids snake_case_ : Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: snake_case_ : List[Any] = pickle.load(UpperCamelCase_ ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: snake_case_ : List[Any] = pickle.load(UpperCamelCase_ ) snake_case_ : Optional[Any] = np.maximum(UpperCamelCase_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case_ : Any = 0.0 # do not predict special tokens snake_case_ : Tuple = torch.from_numpy(UpperCamelCase_ ) else: snake_case_ : Optional[Any] = None snake_case_ : Tuple = LmSeqsDataset(params=UpperCamelCase_ , data=UpperCamelCase_ ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) snake_case_ : List[Any] = student_config_class.from_pretrained(args.student_config ) snake_case_ : List[Any] = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) snake_case_ : Tuple = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase_ ) else: snake_case_ : Optional[int] = student_model_class(UpperCamelCase_ ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # snake_case_ : Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase_ ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(UpperCamelCase_ , UpperCamelCase_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(UpperCamelCase_ , UpperCamelCase_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case_ : List[Any] = Distiller( params=UpperCamelCase_ , dataset=UpperCamelCase_ , token_probs=UpperCamelCase_ , student=UpperCamelCase_ , teacher=UpperCamelCase_ ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
279
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py a_ = '''src/transformers''' a_ = '''docs/source/en/tasks''' def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple: """simple docstring""" with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase__ = f.readlines() # Find the start prompt. lowerCAmelCase__ = 0 while not lines[start_index].startswith(UpperCamelCase_ ): start_index += 1 start_index += 1 lowerCAmelCase__ = start_index while not lines[end_index].startswith(UpperCamelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. a_ = direct_transformers_import(TRANSFORMERS_PATH) a_ = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). a_ = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() ) lowerCAmelCase__ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file( filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ ) if current_list != new_list: if overwrite: with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a_ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
340
0
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = 9 lowerCAmelCase_ : int = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCAmelCase_ : Tuple = kruskal(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase_ : Any = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(UpperCamelCase_ ) == sorted(UpperCamelCase_ )
120
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]: """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." ) if tokenizer_name is None: lowerCAmelCase__ = TOKENIZER_CLASSES else: lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )} logger.info(F"Loading tokenizer classes: {tokenizer_names}" ) for tokenizer_name in tokenizer_names: lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name] lowerCAmelCase__ = True if checkpoint_name is None: lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() ) else: lowerCAmelCase__ = [checkpoint_name] logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" ) for checkpoint in checkpoint_names: logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" ) # Load tokenizer lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ ) # Save fast tokenizer logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" ) # For organization names we create sub-directories if "/" in checkpoint: lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" ) lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) elif add_prefix: lowerCAmelCase__ = checkpoint lowerCAmelCase__ = dump_path else: lowerCAmelCase__ = None lowerCAmelCase__ = dump_path logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0] if next_char == "/": lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = None logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) lowerCAmelCase__ = tokenizer.save_pretrained( UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ ) logger.info(F"=> File names {file_names}" ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(UpperCamelCase_ ) logger.info(F"=> removing {file_name}" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) a_ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
340
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Optional[int] = 'char' A_ : Dict = 'bpe' A_ : Dict = 'wp' snake_case_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Union[str, Any] = ['image_processor', 'char_tokenizer'] A_ : List[Any] = 'ViTImageProcessor' A_ : List[Any] = 'MgpstrTokenizer' def __init__(self : int , a__ : Union[str, Any]=None , a__ : Any=None , **a__ : List[Any] ): """simple docstring""" __snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __UpperCAmelCase , ) __snake_case = kwargs.pop('''feature_extractor''' ) __snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __snake_case = tokenizer __snake_case = AutoTokenizer.from_pretrained('''gpt2''' ) __snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__(self : Tuple , a__ : Tuple=None , a__ : Any=None , a__ : List[str]=None , **a__ : Dict ): """simple docstring""" if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __snake_case = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: __snake_case = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: __snake_case = encodings['''input_ids'''] return inputs def a (self : Any , a__ : Union[str, Any] ): """simple docstring""" __snake_case , __snake_case , __snake_case = sequences __snake_case = char_preds.size(0 ) __snake_case , __snake_case = self._decode_helper(__UpperCAmelCase , '''char''' ) __snake_case , __snake_case = self._decode_helper(__UpperCAmelCase , '''bpe''' ) __snake_case , __snake_case = self._decode_helper(__UpperCAmelCase , '''wp''' ) __snake_case = [] __snake_case = [] for i in range(__UpperCAmelCase ): __snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] __snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] __snake_case = scores.index(max(__UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __snake_case = {} __snake_case = final_strs __snake_case = final_scores __snake_case = char_strs __snake_case = bpe_strs __snake_case = wp_strs return out def a (self : Any , a__ : Dict , a__ : Union[str, Any] ): """simple docstring""" if format == DecodeType.CHARACTER: __snake_case = self.char_decode __snake_case = 1 __snake_case = '''[s]''' elif format == DecodeType.BPE: __snake_case = self.bpe_decode __snake_case = 2 __snake_case = '''#''' elif format == DecodeType.WORDPIECE: __snake_case = self.wp_decode __snake_case = 102 __snake_case = '''[SEP]''' else: raise ValueError(f"""Format {format} is not supported.""" ) __snake_case , __snake_case = [], [] __snake_case = pred_logits.size(0 ) __snake_case = pred_logits.size(1 ) __snake_case , __snake_case = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase ) __snake_case = preds_index.view(-1 , __UpperCAmelCase )[:, 1:] __snake_case = decoder(__UpperCAmelCase ) __snake_case , __snake_case = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 ) __snake_case = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase ): __snake_case = preds_str[index].find(__UpperCAmelCase ) __snake_case = preds_str[index][:pred_eos] __snake_case = preds_index[index].cpu().tolist() __snake_case = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1 __snake_case = preds_max_prob[index][: pred_eos_index + 1] __snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase ) conf_scores.append(__UpperCAmelCase ) return dec_strs, conf_scores def a (self : str , a__ : str ): """simple docstring""" __snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs def a (self : List[str] , a__ : Union[str, Any] ): """simple docstring""" return self.bpe_tokenizer.batch_decode(__UpperCAmelCase ) def a (self : List[str] , a__ : Any ): """simple docstring""" __snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs
24
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ ) lowerCAmelCase__ = tok.pad_token_id def get_lens(UpperCamelCase_ : str ): lowerCAmelCase__ = tqdm( DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCAmelCase__ = [] for batch in dl: lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist() lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ): max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) ) else: max_lens.extend(UpperCamelCase_ ) return max_lens lowerCAmelCase__ = get_lens(UpperCamelCase_ ) lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ ) lowerCAmelCase__ = get_lens(UpperCamelCase_ ) pickle_save(UpperCamelCase_ , train_ds.len_file ) pickle_save(UpperCamelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
340
0
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _UpperCAmelCase : '''simple docstring''' def __lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _UpperCAmelCase : int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _UpperCAmelCase : int = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _UpperCAmelCase : Any = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) _UpperCAmelCase : List[str] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Any: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) _UpperCAmelCase : int = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , ) torch.manual_seed(0 ) _UpperCAmelCase : Any = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Any = self.get_dummy_components() _UpperCAmelCase : str = self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _UpperCAmelCase : Dict = self.get_dummy_inputs(__UpperCAmelCase ) _UpperCAmelCase : str = inputs['''prompt'''] _UpperCAmelCase : List[str] = inputs['''generator'''] _UpperCAmelCase : str = inputs['''num_inference_steps'''] _UpperCAmelCase : Optional[Any] = inputs['''output_type'''] if "image" in inputs: _UpperCAmelCase : str = inputs['''image'''] else: _UpperCAmelCase : List[str] = None if "mask_image" in inputs: _UpperCAmelCase : Optional[int] = inputs['''mask_image'''] else: _UpperCAmelCase : Optional[int] = None if "original_image" in inputs: _UpperCAmelCase : int = inputs['''original_image'''] else: _UpperCAmelCase : int = None _UpperCAmelCase , _UpperCAmelCase : Optional[int] = pipe.encode_prompt(__UpperCAmelCase ) # inputs with prompt converted to embeddings _UpperCAmelCase : Optional[Any] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: _UpperCAmelCase : Any = image if mask_image is not None: _UpperCAmelCase : Any = mask_image if original_image is not None: _UpperCAmelCase : Union[str, Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _UpperCAmelCase : List[Any] = pipe(**__UpperCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__UpperCAmelCase ) _UpperCAmelCase : int = self.pipeline_class.from_pretrained(__UpperCAmelCase ) pipe_loaded.to(__UpperCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , ) _UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase ) _UpperCAmelCase : Dict = inputs['''generator'''] _UpperCAmelCase : Union[str, Any] = inputs['''num_inference_steps'''] _UpperCAmelCase : Any = inputs['''output_type'''] # inputs with prompt converted to embeddings _UpperCAmelCase : str = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: _UpperCAmelCase : int = image if mask_image is not None: _UpperCAmelCase : Optional[Any] = mask_image if original_image is not None: _UpperCAmelCase : Union[str, Any] = original_image _UpperCAmelCase : int = pipe_loaded(**__UpperCAmelCase )[0] _UpperCAmelCase : Any = np.abs(to_np(__UpperCAmelCase ) - to_np(__UpperCAmelCase ) ).max() self.assertLess(__UpperCAmelCase , 1E-4 ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = self.get_dummy_components() _UpperCAmelCase : List[Any] = self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _UpperCAmelCase : int = self.get_dummy_inputs(__UpperCAmelCase ) _UpperCAmelCase : List[str] = pipe(**__UpperCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__UpperCAmelCase ) _UpperCAmelCase : int = self.pipeline_class.from_pretrained(__UpperCAmelCase ) pipe_loaded.to(__UpperCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests _UpperCAmelCase : List[Any] = self.get_dummy_inputs(__UpperCAmelCase ) _UpperCAmelCase : Union[str, Any] = pipe_loaded(**__UpperCAmelCase )[0] _UpperCAmelCase : Any = np.abs(to_np(__UpperCAmelCase ) - to_np(__UpperCAmelCase ) ).max() self.assertLess(__UpperCAmelCase , 1E-4 )
263
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase__ ( _UpperCAmelCase ): a_ ="""xlnet""" a_ =["""mems"""] a_ ={ """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int: '''simple docstring''' lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = n_layer lowerCAmelCase__ = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) lowerCAmelCase__ = d_model // n_head lowerCAmelCase__ = ff_activation lowerCAmelCase__ = d_inner lowerCAmelCase__ = untie_r lowerCAmelCase__ = attn_type lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = dropout lowerCAmelCase__ = mem_len lowerCAmelCase__ = reuse_len lowerCAmelCase__ = bi_data lowerCAmelCase__ = clamp_len lowerCAmelCase__ = same_length lowerCAmelCase__ = summary_type lowerCAmelCase__ = summary_use_proj lowerCAmelCase__ = summary_activation lowerCAmelCase__ = summary_last_dropout lowerCAmelCase__ = start_n_top lowerCAmelCase__ = end_n_top lowerCAmelCase__ = bos_token_id lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs["use_cache"] lowerCAmelCase__ = use_mems_eval lowerCAmelCase__ = use_mems_train super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self )-> Dict: '''simple docstring''' logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
340
0
_snake_case = {} def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> int: if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __UpperCAmelCase : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __UpperCAmelCase : Union[str, Any] = _calculate(days - 1, UpperCamelCase_, late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __UpperCAmelCase : List[str] = _calculate(days - 1, absent + 1, 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __UpperCAmelCase : List[str] = _calculate(days - 1, UpperCamelCase_, 0 ) __UpperCAmelCase : Tuple = state_late + state_absent + state_ontime __UpperCAmelCase : Tuple = prizestrings return prizestrings def _UpperCamelCase ( snake_case__ = 30 ) -> int: return _calculate(UpperCamelCase_, absent=0, late=0 ) if __name__ == "__main__": print(solution())
157
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple: """simple docstring""" lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase__ = "" else: lowerCAmelCase__ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _a ( UpperCamelCase_ : Dict ) -> Tuple: """simple docstring""" lowerCAmelCase__ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = dct.pop(UpperCamelCase_ ) lowerCAmelCase__ = val def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCAmelCase__ = ViTMSNConfig() lowerCAmelCase__ = 1_000 lowerCAmelCase__ = "datasets/huggingface/label-files" lowerCAmelCase__ = "imagenet-1k-id2label.json" lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) ) lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCAmelCase__ = 384 lowerCAmelCase__ = 1_536 lowerCAmelCase__ = 6 elif "l16" in checkpoint_url: lowerCAmelCase__ = 1_024 lowerCAmelCase__ = 4_096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = 0.1 elif "b4" in checkpoint_url: lowerCAmelCase__ = 4 elif "l7" in checkpoint_url: lowerCAmelCase__ = 7 lowerCAmelCase__ = 1_024 lowerCAmelCase__ = 4_096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = 0.1 lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ ) lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"] lowerCAmelCase__ = ViTImageProcessor(size=config.image_size ) remove_projection_head(UpperCamelCase_ ) lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ ) for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) lowerCAmelCase__ = ViTImageProcessor( size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ ) lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) lowerCAmelCase__ = model(**UpperCamelCase_ ) lowerCAmelCase__ = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase_ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
340
0
import numpy as np import datasets lowerCAmelCase__ = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ lowerCAmelCase__ = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ lowerCAmelCase__ = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' A__ = np.array(__UpperCAmelCase ) A__ = np.array(__UpperCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction A__ = X - np.mean(__UpperCAmelCase ) A__ = np.cov(reference_distribution.T ) try: A__ = np.linalg.inv(__UpperCAmelCase ) except np.linalg.LinAlgError: A__ = np.linalg.pinv(__UpperCAmelCase ) A__ = np.dot(__UpperCAmelCase , __UpperCAmelCase ) A__ = np.dot(__UpperCAmelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
68
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a_ = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class lowercase__ ( _UpperCAmelCase ): def __init__( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' super().__init__(**__UpperCAmelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int: '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]: '''simple docstring''' lowerCAmelCase__ = {} if "candidate_labels" in kwargs: lowerCAmelCase__ = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCAmelCase__ = kwargs["hypothesis_template"] return preprocess_params, {}, {} def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = load_image(__UpperCAmelCase ) lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCAmelCase__ = candidate_labels lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase ) lowerCAmelCase__ = [text_inputs] return inputs def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' lowerCAmelCase__ = model_inputs.pop("candidate_labels" ) lowerCAmelCase__ = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __UpperCAmelCase ): lowerCAmelCase__ = text_inputs[0] else: # Batching case. lowerCAmelCase__ = text_inputs[0][0] lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple: '''simple docstring''' lowerCAmelCase__ = model_outputs.pop("candidate_labels" ) lowerCAmelCase__ = model_outputs["logits"][0] if self.framework == "pt": lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ = probs.tolist() if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = [scores] elif self.framework == "tf": lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 ) lowerCAmelCase__ = probs.numpy().tolist() else: raise ValueError(F"Unsupported framework: {self.framework}" ) lowerCAmelCase__ = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] ) ] return result
340
0
import sys snake_case : List[str] = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" a :Optional[int] = 1 for digit in s: product *= int(UpperCamelCase_ ) return product def __lowerCamelCase ( UpperCAmelCase_ : str = N ): """simple docstring""" a :Optional[int] = -sys.maxsize - 1 a :List[str] = n[:13] a :List[Any] = 13 while cur_index < len(UpperCamelCase_ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): a :List[str] = substr[1:] + n[cur_index] cur_index += 1 else: a :Union[str, Any] = max(UpperCamelCase_ , str_eval(UpperCamelCase_ ) ) a :Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
94
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class lowercase__ ( _UpperCAmelCase, unittest.TestCase ): a_ =BartphoTokenizer a_ =False a_ =True def UpperCAmelCase ( self )-> Dict: '''simple docstring''' super().setUp() lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"] lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ = {"unk_token": "<unk>"} lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F"{token} {vocab_tokens[token]}\n" ) lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = "This is a là test" lowerCAmelCase__ = "This is a<unk><unk> test" return input_text, output_text def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) lowerCAmelCase__ = "This is a là test" lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split() lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ = tokens + [tokenizer.unk_token] lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
340
0
"""simple docstring""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) snake_case_ = downstream_dict["""projector.weight"""] snake_case_ = downstream_dict["""projector.bias"""] snake_case_ = downstream_dict["""model.post_net.linear.weight"""] snake_case_ = downstream_dict["""model.post_net.linear.bias"""] return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) snake_case_ = downstream_dict["""model.linear.weight"""] snake_case_ = downstream_dict["""model.linear.bias"""] return model def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = UniSpeechSatForXVector.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) snake_case_ = downstream_dict["""connector.weight"""] snake_case_ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case_ = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] snake_case_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] snake_case_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] snake_case_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] snake_case_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] snake_case_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] snake_case_ = downstream_dict["""objective.W"""] return model @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: snake_case_ = torch.load(UpperCamelCase_ , map_location="""cpu""" ) snake_case_ = checkpoint["""Downstream"""] snake_case_ = UniSpeechSatConfig.from_pretrained(UpperCamelCase_ ) snake_case_ = WavaVecaFeatureExtractor.from_pretrained( UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , do_normalize=UpperCamelCase_ ) snake_case_ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): snake_case_ = convert_classification(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) elif arch.endswith("""ForAudioFrameClassification""" ): snake_case_ = convert_diarization(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) elif arch.endswith("""ForXVector""" ): snake_case_ = convert_xvector(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: snake_case_ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(UpperCamelCase_ ) hf_model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
347
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } a_ = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } a_ = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class lowercase__ ( _UpperCAmelCase ): a_ =VOCAB_FILES_NAMES a_ =PRETRAINED_VOCAB_FILES_MAP a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ =PRETRAINED_INIT_CONFIGURATION a_ =["""input_ids""", """attention_mask"""] a_ =DistilBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]: '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars ): lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) ) lowerCAmelCase__ = do_lower_case lowerCAmelCase__ = strip_accents lowerCAmelCase__ = tokenize_chinese_chars lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase ) lowerCAmelCase__ = do_lower_case def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]: '''simple docstring''' lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]: '''simple docstring''' lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]: '''simple docstring''' lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
340
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( _UpperCAmelCase ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any: super().__init__() self.register_modules( vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , ) def UpperCamelCase__ ( self , __lowercase = "auto") -> List[str]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __UpperCamelCase :List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCAmelCase) def UpperCamelCase__ ( self) -> Union[str, Any]: self.enable_attention_slicing(__UpperCAmelCase) @torch.no_grad() def __call__( self , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 50 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , __lowercase = None , **__lowercase , ) -> Optional[Any]: if isinstance(__UpperCAmelCase , __UpperCAmelCase): __UpperCamelCase :Optional[Any] = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase): __UpperCamelCase :Tuple = len(__UpperCAmelCase) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase)}""") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCAmelCase)}.""") # get prompt text embeddings __UpperCamelCase :int = self.tokenizer( __UpperCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) __UpperCamelCase :Union[str, Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __UpperCamelCase :Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""") __UpperCamelCase :Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __UpperCamelCase :str = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = text_embeddings.shape __UpperCamelCase :Optional[Any] = text_embeddings.repeat(1 , __UpperCAmelCase , 1) __UpperCamelCase :Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __UpperCamelCase :Union[str, Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __UpperCamelCase :int = 42 if negative_prompt is None: __UpperCamelCase :Tuple = [''''''] elif type(__UpperCAmelCase) is not type(__UpperCAmelCase): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase)} !=""" f""" {type(__UpperCAmelCase)}.""") elif isinstance(__UpperCAmelCase , __UpperCAmelCase): __UpperCamelCase :Optional[Any] = [negative_prompt] elif batch_size != len(__UpperCAmelCase): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase)}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''') else: __UpperCamelCase :Optional[int] = negative_prompt __UpperCamelCase :Dict = text_input_ids.shape[-1] __UpperCamelCase :int = self.tokenizer( __UpperCAmelCase , padding='''max_length''' , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' , ) __UpperCamelCase :Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __UpperCamelCase :Tuple = uncond_embeddings.shape[1] __UpperCamelCase :Any = uncond_embeddings.repeat(__UpperCAmelCase , __UpperCAmelCase , 1) __UpperCamelCase :Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __UpperCamelCase :Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __UpperCamelCase :Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __UpperCamelCase :Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __UpperCamelCase :int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __UpperCamelCase :List[Any] = torch.randn( __UpperCAmelCase , generator=__UpperCAmelCase , device='''cpu''' , dtype=__UpperCAmelCase).to(self.device) __UpperCamelCase :Optional[Any] = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device='''cpu''' , dtype=__UpperCAmelCase).to( self.device) else: __UpperCamelCase :List[Any] = torch.randn( __UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase) __UpperCamelCase :Optional[int] = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""") __UpperCamelCase :Dict = latents_reference.to(self.device) __UpperCamelCase :Optional[Any] = latents.to(self.device) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __UpperCamelCase :List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2 __UpperCamelCase :int = (latents_shape[2] - latents_shape_reference[2]) // 2 __UpperCamelCase :Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __UpperCamelCase :Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __UpperCamelCase :Optional[int] = 0 if dx < 0 else dx __UpperCamelCase :List[str] = 0 if dy < 0 else dy __UpperCamelCase :Optional[Any] = max(-dx , 0) __UpperCamelCase :int = max(-dy , 0) # import pdb # pdb.set_trace() __UpperCamelCase :Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __UpperCamelCase :List[Any] = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler __UpperCamelCase :Tuple = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __UpperCamelCase :str = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys()) __UpperCamelCase :Dict = {} if accepts_eta: __UpperCamelCase :Tuple = eta for i, t in enumerate(self.progress_bar(__UpperCAmelCase)): # expand the latents if we are doing classifier free guidance __UpperCamelCase :Optional[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __UpperCamelCase :int = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase) # predict the noise residual __UpperCamelCase :Tuple = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase).sample # perform guidance if do_classifier_free_guidance: __UpperCamelCase , __UpperCamelCase :List[str] = noise_pred.chunk(2) __UpperCamelCase :Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __UpperCamelCase :Optional[Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) __UpperCamelCase :Optional[Any] = 1 / 0.1_82_15 * latents __UpperCamelCase :Tuple = self.vae.decode(__UpperCAmelCase).sample __UpperCamelCase :Dict = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCamelCase :Tuple = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if self.safety_checker is not None: __UpperCamelCase :Any = self.feature_extractor(self.numpy_to_pil(__UpperCAmelCase) , return_tensors='''pt''').to( self.device) __UpperCamelCase , __UpperCamelCase :List[str] = self.safety_checker( images=__UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)) else: __UpperCamelCase :Any = None if output_type == "pil": __UpperCamelCase :Optional[int] = self.numpy_to_pil(__UpperCAmelCase) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase)
43
a_ = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
340
0
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger(__name__) def __magic_name__ ( lowercase , lowercase=False ): SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("""head""" ): SCREAMING_SNAKE_CASE_: Dict ="""segformer.encoder.""" + key if key.startswith("""backbone""" ): SCREAMING_SNAKE_CASE_: Optional[int] =key.replace("""backbone""" , """segformer.encoder""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 SCREAMING_SNAKE_CASE_: Tuple =key[key.find("""patch_embed""" ) + len("""patch_embed""" )] SCREAMING_SNAKE_CASE_: Tuple =key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(UpperCamelCase_ )-1}''' ) if "norm" in key: SCREAMING_SNAKE_CASE_: str =key.replace("""norm""" , """layer_norm""" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 SCREAMING_SNAKE_CASE_: Tuple =key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )] SCREAMING_SNAKE_CASE_: int =key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(UpperCamelCase_ )-1}''' ) if "layer_norm1" in key: SCREAMING_SNAKE_CASE_: List[Any] =key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: SCREAMING_SNAKE_CASE_: List[Any] =key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 SCREAMING_SNAKE_CASE_: Tuple =key[key.find("""block""" ) + len("""block""" )] SCREAMING_SNAKE_CASE_: Union[str, Any] =key.replace(f'''block{idx}''' , f'''block.{int(UpperCamelCase_ )-1}''' ) if "attn.q" in key: SCREAMING_SNAKE_CASE_: Any =key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: SCREAMING_SNAKE_CASE_: int =key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: SCREAMING_SNAKE_CASE_: Tuple =key.replace("""attn""" , """attention.self""" ) if "fc1" in key: SCREAMING_SNAKE_CASE_: str =key.replace("""fc1""" , """dense1""" ) if "fc2" in key: SCREAMING_SNAKE_CASE_: Any =key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: SCREAMING_SNAKE_CASE_: Tuple =key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: SCREAMING_SNAKE_CASE_: List[str] =key.replace("""linear_fuse.conv""" , """linear_fuse""" ) SCREAMING_SNAKE_CASE_: Optional[Any] =key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 SCREAMING_SNAKE_CASE_: Union[str, Any] =key[key.find("""linear_c""" ) + len("""linear_c""" )] SCREAMING_SNAKE_CASE_: Union[str, Any] =key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(UpperCamelCase_ )-1}''' ) if key.startswith("""head""" ): SCREAMING_SNAKE_CASE_: List[Any] =key.replace("""head""" , """classifier""" ) SCREAMING_SNAKE_CASE_: List[str] =value return new_state_dict def __magic_name__ ( lowercase , lowercase ): for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) SCREAMING_SNAKE_CASE_: Dict =state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) SCREAMING_SNAKE_CASE_: str =state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_: Optional[Any] =kv_weight[ : config.hidden_sizes[i], : ] SCREAMING_SNAKE_CASE_: Tuple =kv_bias[: config.hidden_sizes[i]] SCREAMING_SNAKE_CASE_: Union[str, Any] =kv_weight[ config.hidden_sizes[i] :, : ] SCREAMING_SNAKE_CASE_: Tuple =kv_bias[ config.hidden_sizes[i] : ] def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return image @torch.no_grad() def __magic_name__ ( lowercase , lowercase , lowercase ): SCREAMING_SNAKE_CASE_: List[Any] =SegformerConfig() SCREAMING_SNAKE_CASE_: List[Any] =False # set attributes based on model_name SCREAMING_SNAKE_CASE_: str ="""huggingface/label-files""" if "segformer" in model_name: SCREAMING_SNAKE_CASE_: Tuple =model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2] if "ade" in model_name: SCREAMING_SNAKE_CASE_: Optional[int] =150 SCREAMING_SNAKE_CASE_: Any ="""ade20k-id2label.json""" SCREAMING_SNAKE_CASE_: str =(1, 150, 128, 128) elif "city" in model_name: SCREAMING_SNAKE_CASE_: str =19 SCREAMING_SNAKE_CASE_: Union[str, Any] ="""cityscapes-id2label.json""" SCREAMING_SNAKE_CASE_: Tuple =(1, 19, 128, 128) else: raise ValueError(f'''Model {model_name} not supported''' ) elif "mit" in model_name: SCREAMING_SNAKE_CASE_: Optional[int] =True SCREAMING_SNAKE_CASE_: int =model_name[4:6] SCREAMING_SNAKE_CASE_: int =1000 SCREAMING_SNAKE_CASE_: str ="""imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE_: Union[str, Any] =(1, 1000) else: raise ValueError(f'''Model {model_name} not supported''' ) # set config attributes SCREAMING_SNAKE_CASE_: Optional[int] =json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE_: Optional[Any] ={int(UpperCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: List[Any] =idalabel SCREAMING_SNAKE_CASE_: List[str] ={v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": SCREAMING_SNAKE_CASE_: Dict =[64, 128, 320, 512] SCREAMING_SNAKE_CASE_: Tuple =256 elif size == "b2": SCREAMING_SNAKE_CASE_: Dict =[64, 128, 320, 512] SCREAMING_SNAKE_CASE_: Tuple =768 SCREAMING_SNAKE_CASE_: Union[str, Any] =[3, 4, 6, 3] elif size == "b3": SCREAMING_SNAKE_CASE_: Tuple =[64, 128, 320, 512] SCREAMING_SNAKE_CASE_: Tuple =768 SCREAMING_SNAKE_CASE_: Tuple =[3, 4, 18, 3] elif size == "b4": SCREAMING_SNAKE_CASE_: List[Any] =[64, 128, 320, 512] SCREAMING_SNAKE_CASE_: Union[str, Any] =768 SCREAMING_SNAKE_CASE_: List[Any] =[3, 8, 27, 3] elif size == "b5": SCREAMING_SNAKE_CASE_: Union[str, Any] =[64, 128, 320, 512] SCREAMING_SNAKE_CASE_: Optional[int] =768 SCREAMING_SNAKE_CASE_: str =[3, 6, 40, 3] else: raise ValueError(f'''Size {size} not supported''' ) # load image processor (only resize + normalize) SCREAMING_SNAKE_CASE_: List[Any] =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ ) # prepare image SCREAMING_SNAKE_CASE_: str =prepare_img() SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict if encoder_only: SCREAMING_SNAKE_CASE_: Dict =torch.load(UpperCamelCase_ , map_location=torch.device("""cpu""" ) ) else: SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.load(UpperCamelCase_ , map_location=torch.device("""cpu""" ) )["""state_dict"""] # rename keys SCREAMING_SNAKE_CASE_: Dict =rename_keys(UpperCamelCase_ , encoder_only=UpperCamelCase_ ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(UpperCamelCase_ , UpperCamelCase_ ) # create HuggingFace model and load state dict if encoder_only: SCREAMING_SNAKE_CASE_: Tuple =False SCREAMING_SNAKE_CASE_: Tuple =SegformerForImageClassification(UpperCamelCase_ ) else: SCREAMING_SNAKE_CASE_: Dict =SegformerForSemanticSegmentation(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() # forward pass SCREAMING_SNAKE_CASE_: int =model(UpperCamelCase_ ) SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": SCREAMING_SNAKE_CASE_: int =torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": SCREAMING_SNAKE_CASE_: Tuple =torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": SCREAMING_SNAKE_CASE_: Tuple =torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": SCREAMING_SNAKE_CASE_: List[str] =torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": SCREAMING_SNAKE_CASE_: List[str] =torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": SCREAMING_SNAKE_CASE_: Dict =torch.tensor( [ [ [-1.13_72e01, -1.27_87e01, -1.34_77e01], [-1.25_36e01, -1.41_94e01, -1.44_09e01], [-1.32_17e01, -1.48_88e01, -1.53_27e01], ], [ [-1.47_91e01, -1.71_22e01, -1.82_77e01], [-1.71_63e01, -1.91_92e01, -1.95_33e01], [-1.78_97e01, -1.99_91e01, -2.03_15e01], ], [ [7.67_23e-01, 4.19_21e-01, -7.78_78e-02], [4.77_72e-01, 9.55_57e-03, -2.80_82e-01], [3.60_32e-01, -2.48_26e-01, -5.11_68e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": SCREAMING_SNAKE_CASE_: Optional[int] =torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": SCREAMING_SNAKE_CASE_: Any =torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": SCREAMING_SNAKE_CASE_: Dict =torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": SCREAMING_SNAKE_CASE_: Tuple =torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: SCREAMING_SNAKE_CASE_: List[str] =logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you\'d like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _UpperCAmelCase = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
173
import collections import importlib.util import os import re from pathlib import Path a_ = '''src/transformers''' # Matches is_xxx_available() a_ = re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", a_ = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], a_ = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: a_ = re.compile(r'''^\s*try:''') # Catches a line with else: a_ = re.compile(r'''^\s*else:''') def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]: """simple docstring""" if _re_test_backend.search(UpperCamelCase_ ) is None: return None lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )] backends.sort() return "_and_".join(UpperCamelCase_ ) def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple: """simple docstring""" with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = 0 while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase_ ): lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0] lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ ) if single_line_import_search is not None: lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0] objects.extend(UpperCamelCase_ ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): lowerCAmelCase__ = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None: lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " ) lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0] objects.extend(UpperCamelCase_ ) elif _re_between_brackets.search(UpperCamelCase_ ) is not None: lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " ) lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0] objects.extend(UpperCamelCase_ ) elif _re_quote_object.search(UpperCamelCase_ ) is not None: objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ = [] while ( line_index < len(UpperCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): lowerCAmelCase__ = lines[line_index] lowerCAmelCase__ = _re_import.search(UpperCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ = {"none": objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): lowerCAmelCase__ = lines[line_index] lowerCAmelCase__ = _re_import.search(UpperCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str: """simple docstring""" def find_duplicates(UpperCamelCase_ : str ): return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ = [] for key in import_dict_objects.keys(): lowerCAmelCase__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" ) lowerCAmelCase__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend" errors.append(F"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F" {a} in _import_structure but not in TYPE_HINT." ) return errors def _a ( ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = [] for root, _, files in os.walk(UpperCamelCase_ ): if "__init__.py" in files: lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" ) lowerCAmelCase__ = parse_init(UpperCamelCase_ ) if objects is not None: lowerCAmelCase__ = analyze_results(*UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(UpperCamelCase_ ) ) if len(UpperCamelCase_ ) > 0: raise ValueError("\n\n".join(UpperCamelCase_ ) ) def _a ( ) -> str: """simple docstring""" lowerCAmelCase__ = [] for path, directories, files in os.walk(UpperCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(UpperCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0: continue lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) ) lowerCAmelCase__ = short_path.replace(os.path.sep , "." ) submodules.append(UpperCamelCase_ ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) ) lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(UpperCamelCase_ ) return submodules a_ = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _a ( ) -> int: """simple docstring""" lowerCAmelCase__ = importlib.util.spec_from_file_location( "transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase__ = spec.loader.load_module() lowerCAmelCase__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(UpperCamelCase_ ) > 0: lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
340
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=_UpperCAmelCase ): """simple docstring""" __a = ["""torch""", """torchsde"""] def __init__( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : int ): '''simple docstring''' requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def lowerCamelCase__ ( cls : Optional[int] , *UpperCamelCase : Tuple , **UpperCamelCase : int ): '''simple docstring''' requires_backends(cls , ["""torch""", """torchsde"""] )
115
from __future__ import annotations import os from collections.abc import Mapping a_ = tuple[int, int] class lowercase__ : def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None: '''simple docstring''' lowerCAmelCase__ = vertices lowerCAmelCase__ = { (min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items() } def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None: '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase__ = weight def UpperCAmelCase ( self )-> Graph: '''simple docstring''' lowerCAmelCase__ = Graph({min(self.vertices )} , {} ) lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase__ = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase__ = edge lowerCAmelCase__ = weight subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase ) return subgraph def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int: """simple docstring""" lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = {} lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 with open(UpperCamelCase_ ) as f: lowerCAmelCase__ = f.read().strip().split("\n" ) lowerCAmelCase__ = [line.split("," ) for line in data] for edgea in range(1 , len(UpperCamelCase_ ) ): for edgea in range(UpperCamelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ ) lowerCAmelCase__ = graph.prims_algorithm() lowerCAmelCase__ = sum(graph.edges.values() ) lowerCAmelCase__ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
340
0
import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ ( _UpperCamelCase ) -> float: """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase_ , 0 , UpperCamelCase_ , args=(UpperCamelCase_) )[0] def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> float: """simple docstring""" return math.pow(UpperCamelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
279
from collections import defaultdict from math import gcd def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int: """simple docstring""" lowerCAmelCase__ = defaultdict(UpperCamelCase_ ) lowerCAmelCase__ = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ): if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1: continue lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"{solution() = }")
340
0
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : int = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=UpperCamelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=UpperCamelCase_ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=UpperCamelCase_ ) return parser.parse_args() def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : str = parse_args() # Import training_script as a module. lowerCAmelCase_ : Optional[int] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCAmelCase_ : List[str] = script_fpath.stem lowerCAmelCase_ : Tuple = importlib.import_module(UpperCamelCase_ ) # Patch sys.argv lowerCAmelCase_ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
120
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase__ ( _UpperCAmelCase ): a_ =["""image_processor""", """tokenizer"""] a_ ="""LayoutLMv2ImageProcessor""" a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple: '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs.pop("feature_extractor" ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ = features["words"] lowerCAmelCase__ = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) # add pixel values lowerCAmelCase__ = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowerCAmelCase__ = images return encoded_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str: '''simple docstring''' lowerCAmelCase__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" ) return images_with_overflow def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self )-> str: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
340
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer snake_case_ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast snake_case_ = TaTokenizerFast snake_case_ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys snake_case_ = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
24
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowercase__ ( unittest.TestCase ): def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = tempfile.mkdtemp() # fmt: off lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowerCAmelCase__ = {"unk_token": "<unk>"} lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__UpperCAmelCase ) ) lowerCAmelCase__ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase ) def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCAmelCase ( self )-> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase ) def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase ) lowerCAmelCase__ = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" ) lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" ) lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = "lower newer" lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> Any: '''simple docstring''' lowerCAmelCase__ = "google/owlvit-base-patch32" lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ = ["cat", "nasa badge"] lowerCAmelCase__ = processor(text=__UpperCAmelCase ) lowerCAmelCase__ = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "google/owlvit-base-patch32" lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]] lowerCAmelCase__ = processor(text=__UpperCAmelCase ) lowerCAmelCase__ = 16 lowerCAmelCase__ = len(__UpperCAmelCase ) lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> str: '''simple docstring''' lowerCAmelCase__ = "google/owlvit-base-patch32" lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ = ["cat", "nasa badge"] lowerCAmelCase__ = processor(text=__UpperCAmelCase ) lowerCAmelCase__ = 16 lowerCAmelCase__ = inputs["input_ids"] lowerCAmelCase__ = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase ) lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
340
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor _lowerCAmelCase :Any = logging.get_logger(__name__) class _UpperCAmelCase ( _UpperCAmelCase ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
263
from __future__ import annotations from cmath import sqrt def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]: """simple docstring""" if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) lowerCAmelCase__ = b * b - 4 * a * c lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a) lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def _a ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 ) print(F"The solutions are: {solutiona} and {solutiona}" ) if __name__ == "__main__": main()
340
0
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]: if not head: return True # split the list to two parts __UpperCAmelCase , __UpperCAmelCase : str = head.next, head while fast and fast.next: __UpperCAmelCase : List[str] = fast.next.next __UpperCAmelCase : Optional[int] = slow.next __UpperCAmelCase : Tuple = slow.next __UpperCAmelCase : Tuple = None # Don't forget here! But forget still works! # reverse the second part __UpperCAmelCase : Union[str, Any] = None while second: __UpperCAmelCase : Optional[Any] = second.next __UpperCAmelCase : Union[str, Any] = node __UpperCAmelCase : List[str] = second __UpperCAmelCase : Union[str, Any] = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False __UpperCAmelCase : List[str] = node.next __UpperCAmelCase : int = head.next return True def _UpperCamelCase ( snake_case__ ) -> int: if not head or not head.next: return True # 1. Get the midpoint (slow) __UpperCAmelCase : List[str] = head while fast and fast.next: __UpperCAmelCase , __UpperCAmelCase : int = fast.next.next, slow.next # 2. Push the second half into the stack __UpperCAmelCase : List[Any] = [slow.val] while slow.next: __UpperCAmelCase : Optional[Any] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False __UpperCAmelCase : int = cur.next return True def _UpperCamelCase ( snake_case__ ) -> str: if not head or not head.next: return True __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = 0 while head: if head.val in d: d[head.val].append(UpperCamelCase_ ) else: __UpperCAmelCase : Optional[int] = [pos] __UpperCAmelCase : Tuple = head.next pos += 1 __UpperCAmelCase : Optional[int] = pos - 1 __UpperCAmelCase : Optional[int] = 0 for v in d.values(): if len(UpperCamelCase_ ) % 2 != 0: middle += 1 else: __UpperCAmelCase : Optional[int] = 0 for i in range(0, len(UpperCamelCase_ ) ): if v[i] + v[len(UpperCamelCase_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
157
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError("number of qubits must be a integer." ) if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0." ) if math.floor(UpperCamelCase_ ) != number_of_qubits: raise ValueError("number of qubits must be exact integer." ) if number_of_qubits > 10: raise ValueError("number of qubits too large to simulate(>10)." ) lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" ) lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" ) lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = number_of_qubits for i in range(UpperCamelCase_ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(UpperCamelCase_ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ ) # simulate with 10000 shots lowerCAmelCase__ = Aer.get_backend("qasm_simulator" ) lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 ) return job.result().get_counts(UpperCamelCase_ ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
340
0
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class a__ ( _UpperCAmelCase ): """simple docstring""" __lowerCamelCase = 'masked_bert' def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase="topK" , lowercase="constant" , lowercase=0.0 , **lowercase , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = pruning_method A__ = mask_init A__ = mask_scale
68
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase__ ( _UpperCAmelCase ): a_ ="""char""" a_ ="""bpe""" a_ ="""wp""" a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase__ ( _UpperCAmelCase ): a_ =["""image_processor""", """char_tokenizer"""] a_ ="""ViTImageProcessor""" a_ ="""MgpstrTokenizer""" def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str: '''simple docstring''' lowerCAmelCase__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs.pop("feature_extractor" ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowerCAmelCase__ = tokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" ) lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]: '''simple docstring''' if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCAmelCase__ = encodings["input_ids"] return inputs def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences lowerCAmelCase__ = char_preds.size(0 ) lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" ) lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" ) lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] for i in range(__UpperCAmelCase ): lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCAmelCase__ = {} lowerCAmelCase__ = final_strs lowerCAmelCase__ = final_scores lowerCAmelCase__ = char_strs lowerCAmelCase__ = bpe_strs lowerCAmelCase__ = wp_strs return out def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' if format == DecodeType.CHARACTER: lowerCAmelCase__ = self.char_decode lowerCAmelCase__ = 1 lowerCAmelCase__ = "[s]" elif format == DecodeType.BPE: lowerCAmelCase__ = self.bpe_decode lowerCAmelCase__ = 2 lowerCAmelCase__ = "#" elif format == DecodeType.WORDPIECE: lowerCAmelCase__ = self.wp_decode lowerCAmelCase__ = 102 lowerCAmelCase__ = "[SEP]" else: raise ValueError(F"Format {format} is not supported." ) lowerCAmelCase__ , lowerCAmelCase__ = [], [] lowerCAmelCase__ = pred_logits.size(0 ) lowerCAmelCase__ = pred_logits.size(1 ) lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase ) lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:] lowerCAmelCase__ = decoder(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 ) lowerCAmelCase__ = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase ): lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase ) lowerCAmelCase__ = preds_str[index][:pred_eos] lowerCAmelCase__ = preds_index[index].cpu().tolist() lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1 lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1] lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase ) conf_scores.append(__UpperCAmelCase ) return dec_strs, conf_scores def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]: '''simple docstring''' return self.bpe_tokenizer.batch_decode(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs
340
0
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() snake_case : List[str] = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] snake_case : Optional[Any] = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): """simple docstring""" a :Optional[Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks a :Union[str, Any] = int(re.match(R'''.*layer_(\d*).*''' , UpperCamelCase_ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" if dtype == torch.bool: return 1 / 8 a :List[Any] = re.search(R'''[^\d](\d+)$''' , str(UpperCamelCase_ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) a :Any = int(bit_search.groups()[0] ) return bit_size // 8 def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ): """simple docstring""" if bloom_config_file == "": a :List[Any] = BloomConfig() else: a :Any = BloomConfig.from_json_file(UpperCamelCase_ ) if shard_model: a :Optional[Any] = os.listdir(UpperCamelCase_ ) a :Optional[int] = sorted(filter(lambda UpperCAmelCase_ : s.startswith('''layer''' ) and "model_00" in s , UpperCamelCase_ ) ) a :List[Any] = {'''weight_map''': {}, '''metadata''': {}} a :Any = 0 a :List[Any] = None a :List[str] = BloomConfig() for j, file in enumerate(UpperCamelCase_ ): print('''Processing file: {}'''.format(UpperCamelCase_ ) ) a :List[Any] = None for i in range(UpperCamelCase_ ): # load all TP files a :Tuple = file.replace('''model_00''' , F'''model_0{i}''' ) a :Tuple = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='''cpu''' ) # Rename keys in the transformers names a :Optional[int] = list(temp.keys() ) for key in keys: a :Optional[Any] = temp.pop(UpperCamelCase_ ) if tensors is None: a :List[str] = temp else: for key in tensors.keys(): if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel a :Any = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks a :str = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): a :Tuple = tensors[key] / pretraining_tp torch.save( UpperCamelCase_ , os.path.join( UpperCamelCase_ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): a :str = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: a :str = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) ) a :int = BloomConfig() a :List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME a :int = total_size with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(UpperCamelCase_ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: a :Tuple = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '''\n''' f.write(UpperCamelCase_ ) else: a :Union[str, Any] = BloomModel(UpperCamelCase_ ) a :Dict = os.listdir(UpperCamelCase_ ) a :List[Any] = sorted(filter(lambda UpperCAmelCase_ : s.startswith('''layer''' ) and "model_00" in s , UpperCamelCase_ ) ) a :Union[str, Any] = None for i, file in enumerate(UpperCamelCase_ ): a :Optional[Any] = None for i in range(UpperCamelCase_ ): # load all TP files a :Union[str, Any] = file.replace('''model_00''' , F'''model_0{i}''' ) a :List[str] = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='''cpu''' ) # Rename keys in the transformers names a :List[Any] = list(temp.keys() ) for key in keys: a :List[str] = temp.pop(UpperCamelCase_ ) if tensors is None: a :Tuple = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel a :Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks a :Optional[int] = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): a :Optional[int] = tensors[key] / pretraining_tp a :List[Any] = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: a :Optional[Any] = set(other_keys.missing_keys ) else: a :List[str] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) a :Optional[int] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME a :str = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: a :Tuple = model.to(config.torch_dtype ) torch.save(model.state_dict() , UpperCamelCase_ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": snake_case : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) snake_case : List[Any] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
94
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { 'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class __A (_UpperCAmelCase): '''simple docstring''' __lowercase: List[Any] = """donut-swin""" __lowercase: List[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[Any] , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[int]=96 , UpperCAmelCase_ : Optional[int]=[2, 2, 6, 2] , UpperCAmelCase_ : int=[3, 6, 12, 24] , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Optional[Any]=4.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Tuple=1E-5 , **UpperCAmelCase_ : Any , ) ->int: """simple docstring""" super().__init__(**__UpperCAmelCase ) snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = len(__UpperCAmelCase ) snake_case_ = num_heads snake_case_ = window_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = use_absolute_embeddings snake_case_ = layer_norm_eps snake_case_ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
347
from collections import defaultdict def _a ( UpperCamelCase_ : int ) -> int: """simple docstring""" lowerCAmelCase__ = 1 lowerCAmelCase__ = True for v in tree[start]: if v not in visited: ret += dfs(UpperCamelCase_ ) if ret % 2 == 0: cuts.append(UpperCamelCase_ ) return ret def _a ( ) -> Optional[Any]: """simple docstring""" dfs(1 ) if __name__ == "__main__": a_, a_ = 10, 9 a_ = defaultdict(list) a_ = {} a_ = [] a_ = 0 a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
340
0
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''', '''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''', '''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''', } class lowerCamelCase_ ( _UpperCAmelCase ): '''simple docstring''' a__ : int = """owlvit_text_model""" def __init__( self , __lowercase=49_408 , __lowercase=512 , __lowercase=2_048 , __lowercase=12 , __lowercase=8 , __lowercase=16 , __lowercase="quick_gelu" , __lowercase=1E-5 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=0 , __lowercase=49_406 , __lowercase=49_407 , **__lowercase , ) -> Optional[Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) __UpperCamelCase :Optional[int] = vocab_size __UpperCamelCase :int = hidden_size __UpperCamelCase :List[str] = intermediate_size __UpperCamelCase :Dict = num_hidden_layers __UpperCamelCase :str = num_attention_heads __UpperCamelCase :List[Any] = max_position_embeddings __UpperCamelCase :Union[str, Any] = hidden_act __UpperCamelCase :Optional[int] = layer_norm_eps __UpperCamelCase :Optional[Any] = attention_dropout __UpperCamelCase :str = initializer_range __UpperCamelCase :Any = initializer_factor @classmethod def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig": cls._set_token_in_kwargs(__UpperCAmelCase) __UpperCamelCase , __UpperCamelCase :List[str] = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''') == "owlvit": __UpperCamelCase :Any = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase) class lowerCamelCase_ ( _UpperCAmelCase ): '''simple docstring''' a__ : Union[str, Any] = """owlvit_vision_model""" def __init__( self , __lowercase=768 , __lowercase=3_072 , __lowercase=12 , __lowercase=12 , __lowercase=3 , __lowercase=768 , __lowercase=32 , __lowercase="quick_gelu" , __lowercase=1E-5 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , **__lowercase , ) -> List[str]: super().__init__(**__UpperCAmelCase) __UpperCamelCase :Optional[int] = hidden_size __UpperCamelCase :Any = intermediate_size __UpperCamelCase :List[Any] = num_hidden_layers __UpperCamelCase :str = num_attention_heads __UpperCamelCase :List[Any] = num_channels __UpperCamelCase :Tuple = image_size __UpperCamelCase :Dict = patch_size __UpperCamelCase :Any = hidden_act __UpperCamelCase :Union[str, Any] = layer_norm_eps __UpperCamelCase :int = attention_dropout __UpperCamelCase :Any = initializer_range __UpperCamelCase :Dict = initializer_factor @classmethod def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig": cls._set_token_in_kwargs(__UpperCAmelCase) __UpperCamelCase , __UpperCamelCase :Dict = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''') == "owlvit": __UpperCamelCase :int = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase) class lowerCamelCase_ ( _UpperCAmelCase ): '''simple docstring''' a__ : Optional[int] = """owlvit""" a__ : int = True def __init__( self , __lowercase=None , __lowercase=None , __lowercase=512 , __lowercase=2.65_92 , __lowercase=True , **__lowercase , ) -> List[str]: super().__init__(**__UpperCAmelCase) if text_config is None: __UpperCamelCase :Any = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''') if vision_config is None: __UpperCamelCase :Optional[int] = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''') __UpperCamelCase :Union[str, Any] = OwlViTTextConfig(**__UpperCAmelCase) __UpperCamelCase :Dict = OwlViTVisionConfig(**__UpperCAmelCase) __UpperCamelCase :int = projection_dim __UpperCamelCase :Optional[int] = logit_scale_init_value __UpperCamelCase :Optional[Any] = return_dict __UpperCamelCase :Optional[int] = 1.0 @classmethod def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> "PretrainedConfig": cls._set_token_in_kwargs(__UpperCAmelCase) __UpperCamelCase , __UpperCamelCase :str = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase) if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase) @classmethod def UpperCamelCase__ ( cls , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]: __UpperCamelCase :List[Any] = {} __UpperCamelCase :str = text_config __UpperCamelCase :Dict = vision_config return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase) def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__) __UpperCamelCase :List[Any] = self.text_config.to_dict() __UpperCamelCase :str = self.vision_config.to_dict() __UpperCamelCase :Optional[Any] = self.__class__.model_type return output class lowerCamelCase_ ( _UpperCAmelCase ): '''simple docstring''' @property def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ]) @property def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ]) @property def UpperCamelCase__ ( self) -> float: return 1E-4 def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase :List[str] = super().generate_dummy_inputs( processor.tokenizer , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , framework=__UpperCAmelCase) __UpperCamelCase :Dict = super().generate_dummy_inputs( processor.image_processor , batch_size=__UpperCAmelCase , framework=__UpperCAmelCase) return {**text_input_dict, **image_input_dict} @property def UpperCamelCase__ ( self) -> int: return 14
43
import requests from bsa import BeautifulSoup def _a ( UpperCamelCase_ : str = "AAPL" ) -> str: """simple docstring""" lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" ) lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
340
0
"""simple docstring""" import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class a ( _UpperCAmelCase ): def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str=13 , lowerCAmelCase : Tuple=7 , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[int]="None" , lowerCAmelCase : str=3 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : List[Any]=None , ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: str =parent SCREAMING_SNAKE_CASE_: str =batch_size SCREAMING_SNAKE_CASE_: Dict =seq_length SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training SCREAMING_SNAKE_CASE_: Any =use_input_mask SCREAMING_SNAKE_CASE_: Dict =use_token_type_ids SCREAMING_SNAKE_CASE_: Tuple =use_labels SCREAMING_SNAKE_CASE_: Tuple =vocab_size SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_size SCREAMING_SNAKE_CASE_: Dict =num_hidden_layers SCREAMING_SNAKE_CASE_: Tuple =num_attention_heads SCREAMING_SNAKE_CASE_: int =intermediate_size SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob SCREAMING_SNAKE_CASE_: Any =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: Tuple =max_position_embeddings SCREAMING_SNAKE_CASE_: Any =type_vocab_size SCREAMING_SNAKE_CASE_: Optional[int] =type_sequence_label_size SCREAMING_SNAKE_CASE_: Dict =initializer_range SCREAMING_SNAKE_CASE_: Union[str, Any] =num_labels SCREAMING_SNAKE_CASE_: Union[str, Any] =num_choices SCREAMING_SNAKE_CASE_: Tuple =relative_attention SCREAMING_SNAKE_CASE_: Tuple =position_biased_input SCREAMING_SNAKE_CASE_: int =pos_att_type SCREAMING_SNAKE_CASE_: List[str] =scope def lowerCamelCase__ ( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_: List[Any] =None if self.use_input_mask: SCREAMING_SNAKE_CASE_: Any =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) SCREAMING_SNAKE_CASE_: Dict =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_: List[str] =None SCREAMING_SNAKE_CASE_: Any =None SCREAMING_SNAKE_CASE_: Any =None if self.use_labels: SCREAMING_SNAKE_CASE_: Any =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_: Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : List[Any] ) -> str: '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[Any] ) -> Tuple: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =DebertaVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_: Union[str, Any] =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0] SCREAMING_SNAKE_CASE_: Union[str, Any] =model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0] SCREAMING_SNAKE_CASE_: Any =model(__UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: str =DebertaVaForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_: Any =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =self.num_labels SCREAMING_SNAKE_CASE_: Optional[int] =DebertaVaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_: Dict =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__UpperCAmelCase ) def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: str =self.num_labels SCREAMING_SNAKE_CASE_: Optional[int] =DebertaVaForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_: str =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : str ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[Any] =DebertaVaForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_: Tuple =model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: str =DebertaVaForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_: Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_: List[str] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_: Dict =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_: List[Any] =model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Union[str, Any] =config_and_inputs SCREAMING_SNAKE_CASE_: List[Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): UpperCamelCase : Any = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase : Any = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase : Tuple = True UpperCamelCase : Union[str, Any] = False UpperCamelCase : Tuple = False UpperCamelCase : List[Any] = False UpperCamelCase : Any = False def lowerCamelCase__ ( self : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =DebertaVaModelTester(self ) SCREAMING_SNAKE_CASE_: Optional[Any] =ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCamelCase__ ( self : Tuple ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase ) def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase ) def lowerCamelCase__ ( self : str ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase ) def lowerCamelCase__ ( self : Any ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase ) def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase ) def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__UpperCAmelCase ) @slow def lowerCamelCase__ ( self : Any ) -> Optional[int]: '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: int =DebertaVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): @unittest.skip(reason="""Model not available yet""" ) def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' pass @slow def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict =DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Union[str, Any] =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] # compare the actual values for a slice. SCREAMING_SNAKE_CASE_: List[str] =torch.tensor( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
173
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass a_ = (3, 9, -11, 0, 7, 5, 1, -1) a_ = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class lowercase__ : a_ =42 a_ =42 class lowercase__ : def __init__( self , __UpperCAmelCase )-> None: '''simple docstring''' lowerCAmelCase__ = None for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ): lowerCAmelCase__ = Node(__UpperCAmelCase , self.head ) def __iter__( self )-> Iterator[int]: '''simple docstring''' lowerCAmelCase__ = self.head while node: yield node.data lowerCAmelCase__ = node.next_node def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __str__( self )-> str: '''simple docstring''' return " -> ".join([str(__UpperCAmelCase ) for node in self] ) def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() a_ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
340
0
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} UpperCAmelCase : str = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } UpperCAmelCase : Dict = { 'abeja/gpt-neox-japanese-2.7b': 2048, } def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> Tuple: '''simple docstring''' with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f: __UpperCAmelCase : Union[str, Any] = json.loads(f.read() ) __UpperCAmelCase : Optional[int] = collections.OrderedDict() __UpperCAmelCase : Optional[int] = collections.OrderedDict() __UpperCAmelCase : List[Any] = collections.OrderedDict() with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f: __UpperCAmelCase : str = f.readlines() __UpperCAmelCase : str = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(UpperCamelCase_ ): __UpperCAmelCase : int = b __UpperCAmelCase : Optional[Any] = idx for wd in b: __UpperCAmelCase : Tuple = idx return vocab, raw_vocab, ids_to_tokens, emoji class lowerCamelCase__ ( _UpperCAmelCase ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["""input_ids""", """attention_mask"""] def __init__( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str="<|endoftext|>" , UpperCamelCase : Union[str, Any]="<|endoftext|>" , UpperCamelCase : int="<|startoftext|>" , UpperCamelCase : Optional[Any]="<|endoftext|>" , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : Optional[int] , ): '''simple docstring''' super().__init__( unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , do_clean_text=__UpperCAmelCase , **__UpperCAmelCase , ) if not os.path.isfile(__UpperCAmelCase ): raise ValueError( f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) if not os.path.isfile(__UpperCAmelCase ): raise ValueError( f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) __UpperCAmelCase : List[str] = do_clean_text __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : str = load_vocab_and_emoji(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Dict = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return len(self.raw_vocab ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return self.subword_tokenizer.tokenize(__UpperCAmelCase , clean=self.do_clean_text ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(__UpperCAmelCase ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = """""".join(__UpperCAmelCase ).strip() return out_string def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __UpperCAmelCase : Tuple = input_ids[-self.model_max_length :] return input_ids def lowerCamelCase__ ( self : str , UpperCamelCase : Dict , UpperCamelCase : List[str] = None ): '''simple docstring''' __UpperCAmelCase : Dict = 0 if os.path.isdir(__UpperCAmelCase ): __UpperCAmelCase : int = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : List[str] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] ) else: __UpperCAmelCase : int = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : List[str] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) __UpperCAmelCase : Optional[int] = token_index writer.write(""",""".join(__UpperCAmelCase ) + """\n""" ) index += 1 with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: json.dump(self.emoji , __UpperCAmelCase ) return vocab_file, emoji_file class lowerCamelCase__ ( _UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = vocab # same as swe __UpperCAmelCase : Optional[int] = ids_to_tokens # same as bpe __UpperCAmelCase : Optional[Any] = emoji __UpperCAmelCase : Optional[Any] = np.max([len(__UpperCAmelCase ) for w in self.vocab.keys()] ) __UpperCAmelCase : Union[str, Any] = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" ) __UpperCAmelCase : Any = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" ) __UpperCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" ) __UpperCAmelCase : Union[str, Any] = re.compile( R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __UpperCAmelCase : Tuple = re.compile( R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __UpperCAmelCase : Tuple = re.compile( R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" ) __UpperCAmelCase : Optional[int] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" __UpperCAmelCase : Any = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" __UpperCAmelCase : Optional[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} ) def __len__( self : int ): '''simple docstring''' return len(self.ids_to_tokens ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Any = self.content_repattera.sub("""<URL>""" , __UpperCAmelCase ) __UpperCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , __UpperCAmelCase ) __UpperCAmelCase : Tuple = self.content_repattera.sub("""<TEL>""" , __UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.content_repattera.sub("""<DATE>""" , __UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = self.content_repattera.sub("""<DATE>""" , __UpperCAmelCase ) __UpperCAmelCase : Dict = self.content_repattera.sub("""<PRICE>""" , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __UpperCAmelCase : Any = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" ) return content def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=False ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = text.replace(""" """ , """<SP>""" ) __UpperCAmelCase : Dict = text.replace(""" """ , """<SP>""" ) __UpperCAmelCase : Optional[Any] = text.replace("""\r\n""" , """<BR>""" ) __UpperCAmelCase : List[str] = text.replace("""\n""" , """<BR>""" ) __UpperCAmelCase : Optional[int] = text.replace("""\r""" , """<BR>""" ) __UpperCAmelCase : Optional[int] = text.replace("""\t""" , """<TAB>""" ) __UpperCAmelCase : Tuple = text.replace("""—""" , """ー""" ) __UpperCAmelCase : Optional[Any] = text.replace("""−""" , """ー""" ) for k, v in self.emoji["emoji"].items(): if k in text: __UpperCAmelCase : List[str] = text.replace(__UpperCAmelCase , __UpperCAmelCase ) if clean: __UpperCAmelCase : Optional[Any] = self.clean_text(__UpperCAmelCase ) def check_simbol(UpperCamelCase : Optional[Any] ): __UpperCAmelCase : Any = x.encode() if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 2: __UpperCAmelCase : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC2_A1 and c <= 0xC2_BF) or (c >= 0xC7_80 and c <= 0xC7_83) or (c >= 0xCA_B9 and c <= 0xCB_BF) or (c >= 0xCC_80 and c <= 0xCD_A2) ): return True return False def checkuae(UpperCamelCase : int ): __UpperCAmelCase : List[Any] = x.encode() if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 3: __UpperCAmelCase : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE2_80_80 and c <= 0xE2_B0_7F: return True return False __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : List[Any] = [] while pos < len(__UpperCAmelCase ): __UpperCAmelCase : Dict = min(len(__UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 __UpperCAmelCase : Union[str, Any] = [] # (token_id, token, pos) for e in range(__UpperCAmelCase , __UpperCAmelCase , -1 ): __UpperCAmelCase : Dict = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__UpperCAmelCase ) > 2: __UpperCAmelCase : Any = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__UpperCAmelCase ) > 0: # the smallest token_id is adopted __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = sorted(__UpperCAmelCase , key=lambda UpperCamelCase : x[0] )[0] result.append(__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = e else: __UpperCAmelCase : Optional[int] = pos + 1 __UpperCAmelCase : Optional[int] = text[pos:end] if check_simbol(__UpperCAmelCase ): result.append("""<KIGOU>""" ) elif checkuae(__UpperCAmelCase ): result.append("""<U2000U2BFF>""" ) else: for i in wd.encode("""utf-8""" ): result.append("""<|byte%d|>""" % i ) __UpperCAmelCase : Optional[int] = end return result def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str="\n" ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : List[str] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__UpperCAmelCase ) > 0: words.append(bytearray(__UpperCAmelCase ).decode("""utf-8""" , errors="""replace""" ) ) __UpperCAmelCase : Optional[Any] = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word] ) elif word == "<SP>": words.append(""" """ ) elif word == "<BR>": words.append(__UpperCAmelCase ) elif word == "<TAB>": words.append("""\t""" ) elif word == "<BLOCK>": words.append("""▀""" ) elif word == "<KIGOU>": words.append("""ǀ""" ) elif word == "<U2000U2BFF>": words.append("""‖""" ) else: words.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: words.append(bytearray(__UpperCAmelCase ).decode("""utf-8""" , errors="""replace""" ) ) __UpperCAmelCase : Union[str, Any] = """""".join(__UpperCAmelCase ) return text
115
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline a_ = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": a_ = '''hopper-medium-v2''' a_ = gym.make(env_name) a_ = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) a_ = env.reset() a_ = 0 a_ = 0 a_ = 1000 a_ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy a_ = pipeline(obs, planning_horizon=32) # execute action in environment a_, a_, a_, a_ = env.step(denorm_actions) a_ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" F" {total_score}" ) # save observations for rendering rollout.append(next_observation.copy()) a_ = next_observation except KeyboardInterrupt: pass print(F"Total reward: {total_reward}")
340
0
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__''' lowerCAmelCase_ = '''Dummy User''' lowerCAmelCase_ = '''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''' lowerCAmelCase_ = '''https://hub-ci.huggingface.co''' lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" monkeypatch.setattr( '''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCamelCase_ ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCamelCase_ ) monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCamelCase_ ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCamelCase_ ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Dict: """simple docstring""" HfFolder.save_token(UpperCamelCase_ ) yield HfFolder.delete_token() @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" return HfApi(endpoint=UpperCamelCase_ ) @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[int] = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_ ) @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" def _cleanup_repo(_UpperCamelCase ): hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' ) return _cleanup_repo @pytest.fixture def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]: """simple docstring""" @contextmanager def _temporary_repo(_UpperCamelCase ): try: yield repo_id finally: cleanup_repo(UpperCamelCase_ ) return _temporary_repo @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" snake_case_ : Optional[int] = f'''repo_txt_data-{int(time.time() * 10E3 )}''' snake_case_ : Tuple = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' , private=UpperCamelCase_ ) hf_api.upload_file( token=UpperCamelCase_ , path_or_fileobj=str(UpperCamelCase_ ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCamelCase_ , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" snake_case_ : List[str] = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}''' snake_case_ : Any = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' , private=UpperCamelCase_ ) hf_api.upload_file( token=UpperCamelCase_ , path_or_fileobj=str(UpperCamelCase_ ) , path_in_repo='''data.zip''' , repo_id=UpperCamelCase_ , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: """simple docstring""" snake_case_ : Optional[Any] = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}''' snake_case_ : Optional[Any] = f'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' , private=UpperCamelCase_ ) hf_api.upload_file( token=UpperCamelCase_ , path_or_fileobj=str(UpperCamelCase_ ) , path_in_repo='''data.zip''' , repo_id=UpperCamelCase_ , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
279
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py a_ = '''src/transformers''' a_ = '''docs/source/en/tasks''' def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple: """simple docstring""" with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCAmelCase__ = f.readlines() # Find the start prompt. lowerCAmelCase__ = 0 while not lines[start_index].startswith(UpperCamelCase_ ): start_index += 1 start_index += 1 lowerCAmelCase__ = start_index while not lines[end_index].startswith(UpperCamelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. a_ = direct_transformers_import(TRANSFORMERS_PATH) a_ = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). a_ = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide] lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() ) lowerCAmelCase__ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file( filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ ) if current_list != new_list: if overwrite: with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" " to fix this." ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') a_ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
340
0
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __A : Dict = (720, 1280) # Height, Width __A : str = (0.4, 0.6) # if height or width lower than this scale, drop it. __A : Optional[int] = 1 / 100 __A : Dict = "" __A : List[str] = "" __A : int = "" __A : str = 250 def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_, lowerCAmelCase_ : Dict = get_dataset(UpperCamelCase_ , UpperCamelCase_ ) for index in range(UpperCamelCase_ ): lowerCAmelCase_ : List[str] = random.sample(range(len(UpperCamelCase_ ) ) , 4 ) lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : str = update_image_and_anno( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , filter_scale=UpperCamelCase_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowerCAmelCase_ : int = random_chars(32 ) lowerCAmelCase_ : Optional[Any] = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0] lowerCAmelCase_ : Dict = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , UpperCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) lowerCAmelCase_ : Optional[int] = [] for anno in new_annos: lowerCAmelCase_ : Optional[int] = anno[3] - anno[1] lowerCAmelCase_ : Optional[Any] = anno[4] - anno[2] lowerCAmelCase_ : Tuple = anno[1] + width / 2 lowerCAmelCase_ : List[str] = anno[2] + height / 2 lowerCAmelCase_ : str = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(UpperCamelCase_ ) with open(f'{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def UpperCamelCase_ ( A__ : str , A__ : str ): '''simple docstring''' lowerCAmelCase_ : Optional[int] = [] lowerCAmelCase_ : str = [] for label_file in glob.glob(os.path.join(UpperCamelCase_ , """*.txt""" ) ): lowerCAmelCase_ : Any = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(UpperCamelCase_ ) as in_file: lowerCAmelCase_ : Optional[Any] = in_file.readlines() lowerCAmelCase_ : Any = os.path.join(UpperCamelCase_ , f'{label_name}.jpg' ) lowerCAmelCase_ : Any = [] for obj_list in obj_lists: lowerCAmelCase_ : Tuple = obj_list.rstrip("""\n""" ).split(""" """ ) lowerCAmelCase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2 lowerCAmelCase_ : Tuple = float(obj[2] ) - float(obj[4] ) / 2 lowerCAmelCase_ : List[Any] = float(obj[1] ) + float(obj[3] ) / 2 lowerCAmelCase_ : Union[str, Any] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(UpperCamelCase_ ) labels.append(UpperCamelCase_ ) return img_paths, labels def UpperCamelCase_ ( A__ : list , A__ : list , A__ : list[int] , A__ : tuple[int, int] , A__ : tuple[float, float] , A__ : float = 0.0 , ): '''simple docstring''' lowerCAmelCase_ : Dict = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) lowerCAmelCase_ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowerCAmelCase_ : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowerCAmelCase_ : Optional[Any] = int(scale_x * output_size[1] ) lowerCAmelCase_ : List[Any] = int(scale_y * output_size[0] ) lowerCAmelCase_ : List[Any] = [] lowerCAmelCase_ : Optional[int] = [] for i, index in enumerate(UpperCamelCase_ ): lowerCAmelCase_ : Tuple = all_img_list[index] path_list.append(UpperCamelCase_ ) lowerCAmelCase_ : Any = all_annos[index] lowerCAmelCase_ : Union[str, Any] = cva.imread(UpperCamelCase_ ) if i == 0: # top-left lowerCAmelCase_ : Tuple = cva.resize(UpperCamelCase_ , (divid_point_x, divid_point_y) ) lowerCAmelCase_ : List[Any] = img for bbox in img_annos: lowerCAmelCase_ : int = bbox[1] * scale_x lowerCAmelCase_ : List[str] = bbox[2] * scale_y lowerCAmelCase_ : str = bbox[3] * scale_x lowerCAmelCase_ : Tuple = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right lowerCAmelCase_ : Union[str, Any] = cva.resize(UpperCamelCase_ , (output_size[1] - divid_point_x, divid_point_y) ) lowerCAmelCase_ : Union[str, Any] = img for bbox in img_annos: lowerCAmelCase_ : List[str] = scale_x + bbox[1] * (1 - scale_x) lowerCAmelCase_ : int = bbox[2] * scale_y lowerCAmelCase_ : str = scale_x + bbox[3] * (1 - scale_x) lowerCAmelCase_ : List[Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left lowerCAmelCase_ : int = cva.resize(UpperCamelCase_ , (divid_point_x, output_size[0] - divid_point_y) ) lowerCAmelCase_ : Dict = img for bbox in img_annos: lowerCAmelCase_ : Dict = bbox[1] * scale_x lowerCAmelCase_ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y) lowerCAmelCase_ : int = bbox[3] * scale_x lowerCAmelCase_ : Dict = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right lowerCAmelCase_ : Optional[int] = cva.resize( UpperCamelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) lowerCAmelCase_ : int = img for bbox in img_annos: lowerCAmelCase_ : Dict = scale_x + bbox[1] * (1 - scale_x) lowerCAmelCase_ : List[str] = scale_y + bbox[2] * (1 - scale_y) lowerCAmelCase_ : List[str] = scale_x + bbox[3] * (1 - scale_x) lowerCAmelCase_ : Dict = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: lowerCAmelCase_ : str = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def UpperCamelCase_ ( A__ : int ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" lowerCAmelCase_ : int = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) ) if __name__ == "__main__": main() print("DONE ✅")
120
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]: """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." ) if tokenizer_name is None: lowerCAmelCase__ = TOKENIZER_CLASSES else: lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )} logger.info(F"Loading tokenizer classes: {tokenizer_names}" ) for tokenizer_name in tokenizer_names: lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name] lowerCAmelCase__ = True if checkpoint_name is None: lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() ) else: lowerCAmelCase__ = [checkpoint_name] logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" ) for checkpoint in checkpoint_names: logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" ) # Load tokenizer lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ ) # Save fast tokenizer logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" ) # For organization names we create sub-directories if "/" in checkpoint: lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" ) lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) elif add_prefix: lowerCAmelCase__ = checkpoint lowerCAmelCase__ = dump_path else: lowerCAmelCase__ = None lowerCAmelCase__ = dump_path logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0] if next_char == "/": lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = None logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) lowerCAmelCase__ = tokenizer.save_pretrained( UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ ) logger.info(F"=> File names {file_names}" ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(UpperCamelCase_ ) logger.info(F"=> removing {file_name}" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) a_ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
340
0
from typing import TYPE_CHECKING from ...utils import _LazyModule snake_case_ = {'tokenization_byt5': ['ByT5Tokenizer']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
24
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ ) lowerCAmelCase__ = tok.pad_token_id def get_lens(UpperCamelCase_ : str ): lowerCAmelCase__ = tqdm( DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCAmelCase__ = [] for batch in dl: lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist() lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ): max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) ) else: max_lens.extend(UpperCamelCase_ ) return max_lens lowerCAmelCase__ = get_lens(UpperCamelCase_ ) lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ ) lowerCAmelCase__ = get_lens(UpperCamelCase_ ) pickle_save(UpperCamelCase_ , train_ds.len_file ) pickle_save(UpperCamelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
340
0
"""simple docstring""" from collections import defaultdict def lowerCamelCase_ (UpperCamelCase__ : int ): _UpperCAmelCase : Union[str, Any] = 1 _UpperCAmelCase : Dict = True for v in tree[start]: if v not in visited: ret += dfs(UpperCamelCase_ ) if ret % 2 == 0: cuts.append(UpperCamelCase_ ) return ret def lowerCamelCase_ (): dfs(1 ) if __name__ == "__main__": _lowerCAmelCase,_lowerCAmelCase :str = 10, 9 _lowerCAmelCase :Any = defaultdict(list) _lowerCAmelCase :Optional[int] = {} _lowerCAmelCase :Union[str, Any] = [] _lowerCAmelCase :List[str] = 0 _lowerCAmelCase :List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
263
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase__ ( _UpperCAmelCase ): a_ ="""xlnet""" a_ =["""mems"""] a_ ={ """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int: '''simple docstring''' lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = n_layer lowerCAmelCase__ = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) lowerCAmelCase__ = d_model // n_head lowerCAmelCase__ = ff_activation lowerCAmelCase__ = d_inner lowerCAmelCase__ = untie_r lowerCAmelCase__ = attn_type lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = dropout lowerCAmelCase__ = mem_len lowerCAmelCase__ = reuse_len lowerCAmelCase__ = bi_data lowerCAmelCase__ = clamp_len lowerCAmelCase__ = same_length lowerCAmelCase__ = summary_type lowerCAmelCase__ = summary_use_proj lowerCAmelCase__ = summary_activation lowerCAmelCase__ = summary_last_dropout lowerCAmelCase__ = start_n_top lowerCAmelCase__ = end_n_top lowerCAmelCase__ = bos_token_id lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs["use_cache"] lowerCAmelCase__ = use_mems_eval lowerCAmelCase__ = use_mems_train super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self )-> Dict: '''simple docstring''' logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
340
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _snake_case : def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any=13 , __lowerCamelCase: List[Any]=30 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Dict=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: Optional[Any]=37 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Optional[int]=10 , __lowerCamelCase: str=0.02 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: List[str]=2 , ) -> List[str]: __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : str = image_size __UpperCAmelCase : Tuple = patch_size __UpperCAmelCase : Dict = num_channels __UpperCAmelCase : int = is_training __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : List[str] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : str = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : List[str] = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : str = scope __UpperCAmelCase : List[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __UpperCAmelCase : Optional[Any] = (image_size // patch_size) ** 2 __UpperCAmelCase : Any = num_patches + 2 def _lowerCamelCase ( self: int ) -> Optional[Any]: __UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : str = None if self.use_labels: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : str = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: str ) -> Optional[int]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCamelCase ( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Tuple ) -> str: __UpperCAmelCase : Tuple = TFDeiTModel(config=__UpperCAmelCase ) __UpperCAmelCase : Tuple = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple ) -> str: __UpperCAmelCase : Optional[Any] = TFDeiTForMaskedImageModeling(config=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __UpperCAmelCase : Dict = 1 __UpperCAmelCase : Dict = TFDeiTForMaskedImageModeling(__UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any ) -> Union[str, Any]: __UpperCAmelCase : Tuple = self.type_sequence_label_size __UpperCAmelCase : List[str] = TFDeiTForImageClassification(__UpperCAmelCase ) __UpperCAmelCase : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Tuple = TFDeiTForImageClassification(__UpperCAmelCase ) __UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCamelCase ( self: Optional[Any] ) -> List[str]: __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowerCamelCase__: Any = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCamelCase__: Dict = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCamelCase__: Any = False lowerCamelCase__: Tuple = False lowerCamelCase__: Any = False lowerCamelCase__: Optional[int] = False def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Optional[int] = TFDeiTModelTester(self ) __UpperCAmelCase : int = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def _lowerCamelCase ( self: Tuple ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]: pass def _lowerCamelCase ( self: int ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __UpperCAmelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , tf.keras.layers.Dense ) ) def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Dict = model_class(__UpperCAmelCase ) __UpperCAmelCase : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()] __UpperCAmelCase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _lowerCamelCase ( self: Dict ) -> Tuple: __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowerCamelCase ( self: Dict ) -> Union[str, Any]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def _lowerCamelCase ( self: Tuple ) -> List[Any]: __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: Dict=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _lowerCamelCase ( self: Dict ) -> int: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = TFDeiTModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _UpperCamelCase ( ) -> Optional[Any]: __UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Any ) -> Dict: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self: Tuple ) -> List[Any]: __UpperCAmelCase : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) __UpperCAmelCase : Union[str, Any] = self.default_image_processor __UpperCAmelCase : Any = prepare_img() __UpperCAmelCase : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="tf" ) # forward pass __UpperCAmelCase : str = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
157
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple: """simple docstring""" lowerCAmelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase__ = "" else: lowerCAmelCase__ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _a ( UpperCamelCase_ : Dict ) -> Tuple: """simple docstring""" lowerCAmelCase__ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]: """simple docstring""" lowerCAmelCase__ = dct.pop(UpperCamelCase_ ) lowerCAmelCase__ = val def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCAmelCase__ = ViTMSNConfig() lowerCAmelCase__ = 1_000 lowerCAmelCase__ = "datasets/huggingface/label-files" lowerCAmelCase__ = "imagenet-1k-id2label.json" lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) ) lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCAmelCase__ = 384 lowerCAmelCase__ = 1_536 lowerCAmelCase__ = 6 elif "l16" in checkpoint_url: lowerCAmelCase__ = 1_024 lowerCAmelCase__ = 4_096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = 0.1 elif "b4" in checkpoint_url: lowerCAmelCase__ = 4 elif "l7" in checkpoint_url: lowerCAmelCase__ = 7 lowerCAmelCase__ = 1_024 lowerCAmelCase__ = 4_096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = 0.1 lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ ) lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"] lowerCAmelCase__ = ViTImageProcessor(size=config.image_size ) remove_projection_head(UpperCamelCase_ ) lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ ) for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) lowerCAmelCase__ = ViTImageProcessor( size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ ) lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) lowerCAmelCase__ = model(**UpperCamelCase_ ) lowerCAmelCase__ = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase_ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
340
0
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ): _snake_case = 1 _snake_case = 2 _snake_case = 0 _snake_case = 0 _snake_case = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
341
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = ["pixel_values"] def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: super().__init__(**UpperCAmelCase ) _snake_case = size if size is not None else {"""height""": 256, """width""": 256} _snake_case = get_size_dict(UpperCAmelCase ) _snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = do_center_crop _snake_case = crop_size _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_normalize _snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = resample if resample is not None else self.resample _snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case = do_rescale if do_rescale is not None else self.do_rescale _snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case = do_normalize if do_normalize is not None else self.do_normalize _snake_case = image_mean if image_mean is not None else self.image_mean _snake_case = image_std if image_std is not None else self.image_std _snake_case = size if size is not None else self.size _snake_case = get_size_dict(UpperCAmelCase ) _snake_case = crop_size if crop_size is not None else self.crop_size _snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) _snake_case = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _snake_case = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: _snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: _snake_case = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: _snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: _snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] _snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] _snake_case = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
341
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] _snake_case = 11 _snake_case = int("""1""" + """0""" * digit_len ) for num in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 _snake_case = 10 return solutions def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 2 ): _snake_case = 1.0 for fraction in fraction_list(_SCREAMING_SNAKE_CASE ): _snake_case = Fraction(_SCREAMING_SNAKE_CASE ) result *= frac.denominator / frac.numerator return int(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
341
'''simple docstring''' __lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure the supplied data is a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(_SCREAMING_SNAKE_CASE ) _snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data ) _snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0 if padding_needed: # The padding that will be added later _snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6) else: _snake_case = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode() + padding ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = ( """argument should be a bytes-like object or ASCII string, """ f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): try: _snake_case = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) _snake_case = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _snake_case = encoded_data[:-padding] _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data ) _snake_case = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 ) ] return bytes(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase (self ) -> Any: _snake_case = 1 _snake_case = 3 _snake_case = (32, 32) _snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase ) return image @property def lowercase (self ) -> Dict: torch.manual_seed(0 ) _snake_case = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def lowercase (self ) -> List[str]: torch.manual_seed(0 ) _snake_case = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def lowercase (self ) -> List[Any]: torch.manual_seed(0 ) _snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) return CLIPTextModel(UpperCAmelCase ) def lowercase (self ) -> Any: _snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator _snake_case = self.dummy_cond_unet_upscale _snake_case = DDPMScheduler() _snake_case = DDIMScheduler(prediction_type="""v_prediction""" ) _snake_case = self.dummy_vae _snake_case = self.dummy_text_encoder _snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _snake_case = StableDiffusionUpscalePipeline( unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , ) _snake_case = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) _snake_case = """A painting of a squirrel eating a burger""" _snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) _snake_case = sd_pipe( [prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _snake_case = output.images _snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) _snake_case = sd_pipe( [prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCAmelCase , )[0] _snake_case = image[0, -3:, -3:, -1] _snake_case = image_from_tuple[0, -3:, -3:, -1] _snake_case = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _snake_case = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase (self ) -> Any: _snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator _snake_case = self.dummy_cond_unet_upscale _snake_case = DDPMScheduler() _snake_case = DDIMScheduler(prediction_type="""v_prediction""" ) _snake_case = self.dummy_vae _snake_case = self.dummy_text_encoder _snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _snake_case = StableDiffusionUpscalePipeline( unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , ) _snake_case = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) _snake_case = """A painting of a squirrel eating a burger""" _snake_case = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _snake_case = output.images assert image.shape[0] == 2 _snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) _snake_case = sd_pipe( [prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _snake_case = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowercase (self ) -> Optional[Any]: _snake_case = self.dummy_cond_unet_upscale _snake_case = DDPMScheduler() _snake_case = DDIMScheduler(prediction_type="""v_prediction""" ) _snake_case = self.dummy_vae _snake_case = self.dummy_text_encoder _snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _snake_case = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _snake_case = unet.half() _snake_case = text_encoder.half() # make sure here that pndm scheduler skips prk _snake_case = StableDiffusionUpscalePipeline( unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , max_noise_level=350 , ) _snake_case = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) _snake_case = """A painting of a squirrel eating a burger""" _snake_case = torch.manual_seed(0 ) _snake_case = sd_pipe( [prompt] , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _snake_case = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase (self ) -> str: _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _snake_case = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _snake_case = """stabilityai/stable-diffusion-x4-upscaler""" _snake_case = StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() _snake_case = """a cat sitting on a park bench""" _snake_case = torch.manual_seed(0 ) _snake_case = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , output_type="""np""" , ) _snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def lowercase (self ) -> Optional[int]: _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _snake_case = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _snake_case = """stabilityai/stable-diffusion-x4-upscaler""" _snake_case = StableDiffusionUpscalePipeline.from_pretrained( UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() _snake_case = """a cat sitting on a park bench""" _snake_case = torch.manual_seed(0 ) _snake_case = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , output_type="""np""" , ) _snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def lowercase (self ) -> str: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _snake_case = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _snake_case = """stabilityai/stable-diffusion-x4-upscaler""" _snake_case = StableDiffusionUpscalePipeline.from_pretrained( UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _snake_case = """a cat sitting on a park bench""" _snake_case = torch.manual_seed(0 ) _snake_case = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _snake_case = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) _snake_case = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = False ) -> int: _snake_case = scheduler _snake_case = optimizers if isinstance(UpperCAmelCase , (list, tuple) ) else [optimizers] _snake_case = split_batches _snake_case = step_with_optimizer _snake_case = GradientState() def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _snake_case = AcceleratorState().num_processes for _ in range(UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , """total_steps""" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) else: self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) def lowercase (self ) -> Tuple: return self.scheduler.get_last_lr() def lowercase (self ) -> Optional[int]: return self.scheduler.state_dict() def lowercase (self , UpperCAmelCase ) -> List[str]: self.scheduler.load_state_dict(UpperCAmelCase ) def lowercase (self ) -> Optional[int]: return self.scheduler.get_lr() def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: return self.scheduler.print_lr(*UpperCAmelCase , **UpperCAmelCase )
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' from __future__ import annotations __lowerCAmelCase = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = graph # mapping node to its parent in resulting breadth first tree _snake_case = {} _snake_case = source_vertex def lowercase (self ) -> None: _snake_case = {self.source_vertex} _snake_case = None _snake_case = [self.source_vertex] # first in first out queue while queue: _snake_case = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(UpperCAmelCase ) _snake_case = vertex queue.append(UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> str: if target_vertex == self.source_vertex: return self.source_vertex _snake_case = self.parent.get(UpperCAmelCase ) if target_vertex_parent is None: _snake_case = ( f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(UpperCAmelCase ) return self.shortest_path(UpperCAmelCase ) + f"""->{target_vertex}""" if __name__ == "__main__": __lowerCAmelCase = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
341
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union __lowerCAmelCase = TypeVar('T') __lowerCAmelCase = Union[List[T], Tuple[T, ...]] __lowerCAmelCase = Union[T, List[T], Dict[str, T]] __lowerCAmelCase = Union[str, bytes, os.PathLike]
341
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": __lowerCAmelCase = input('Enter image url: ').strip() print(f'''Downloading image from {url} ...''') __lowerCAmelCase = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image __lowerCAmelCase = soup.find('meta', {'property': 'og:image'})['content'] __lowerCAmelCase = requests.get(image_url).content __lowerCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, 'wb') as fp: fp.write(image_data) print(f'''Done. Image saved to disk as {file_name}.''')
341
'''simple docstring''' class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: _snake_case = data _snake_case = previous _snake_case = next_node def __str__(self ) -> str: return f"""{self.data}""" def lowercase (self ) -> int: return self.data def lowercase (self ) -> Dict: return self.next def lowercase (self ) -> Union[str, Any]: return self.previous class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> List[str]: _snake_case = head def __iter__(self ) -> Optional[Any]: return self def lowercase (self ) -> str: if not self.current: raise StopIteration else: _snake_case = self.current.get_data() _snake_case = self.current.get_next() return value class _lowerCAmelCase : '''simple docstring''' def __init__(self ) -> Optional[int]: _snake_case = None # First node in list _snake_case = None # Last node in list def __str__(self ) -> Optional[int]: _snake_case = self.head _snake_case = [] while current is not None: nodes.append(current.get_data() ) _snake_case = current.get_next() return " ".join(str(UpperCAmelCase ) for node in nodes ) def __contains__(self , UpperCAmelCase ) -> int: _snake_case = self.head while current: if current.get_data() == value: return True _snake_case = current.get_next() return False def __iter__(self ) -> Union[str, Any]: return LinkedListIterator(self.head ) def lowercase (self ) -> str: if self.head: return self.head.get_data() return None def lowercase (self ) -> List[Any]: if self.tail: return self.tail.get_data() return None def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: _snake_case = node _snake_case = node else: self.insert_before_node(self.head , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: self.set_head(UpperCAmelCase ) else: self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: _snake_case = Node(UpperCAmelCase ) if self.head is None: self.set_head(UpperCAmelCase ) else: self.set_tail(UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.previous if node.get_previous() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.next if node.get_next() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = 1 _snake_case = Node(UpperCAmelCase ) _snake_case = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase , UpperCAmelCase ) return current_position += 1 _snake_case = node.next self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Node: _snake_case = self.head while node: if node.get_data() == item: return node _snake_case = node.get_next() raise Exception("""Node not found""" ) def lowercase (self , UpperCAmelCase ) -> Optional[int]: if (node := self.get_node(UpperCAmelCase )) is not None: if node == self.head: _snake_case = self.head.get_next() if node == self.tail: _snake_case = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase ) @staticmethod def lowercase (UpperCAmelCase ) -> None: if node.get_next(): _snake_case = node.previous if node.get_previous(): _snake_case = node.next _snake_case = None _snake_case = None def lowercase (self ) -> Dict: return self.head is None def __SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __lowerCAmelCase = 50_003 __lowerCAmelCase = 50_002 @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = PLBartTokenizer lowerCAmelCase_ = None lowerCAmelCase_ = False def lowercase (self ) -> str: super().setUp() # We have a SentencePiece fixture for testing _snake_case = PLBartTokenizer(UpperCAmelCase , language_codes="""base""" , keep_accents=UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase (self ) -> Union[str, Any]: _snake_case = PLBartTokenizer(UpperCAmelCase , language_codes="""base""" , keep_accents=UpperCAmelCase ) _snake_case = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _snake_case = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _snake_case = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _snake_case = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) _snake_case = tokenizer.vocab_size _snake_case = [tokenizer.convert_ids_to_tokens(UpperCAmelCase ) for x in range(end - 4 , UpperCAmelCase )] self.assertListEqual(UpperCAmelCase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) _snake_case = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" _snake_case = tokenizer(UpperCAmelCase ).input_ids self.assertEqual( tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) , UpperCAmelCase , ) def lowercase (self ) -> List[Any]: _snake_case = PLBartTokenizer(UpperCAmelCase , language_codes="""multi""" , keep_accents=UpperCAmelCase ) _snake_case = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _snake_case = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _snake_case = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _snake_case = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) _snake_case = tokenizer.vocab_size _snake_case = [tokenizer.convert_ids_to_tokens(UpperCAmelCase ) for x in range(end - 7 , UpperCAmelCase )] self.assertListEqual( UpperCAmelCase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) _snake_case = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" _snake_case = tokenizer(UpperCAmelCase ).input_ids self.assertEqual( tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) , UpperCAmelCase , ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = "uclanlp/plbart-python-en_XX" lowerCAmelCase_ = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] lowerCAmelCase_ = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] lowerCAmelCase_ = [ 1_34, 54_52, 3_34_60, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 9_88, 20, 3_34_56, 19, 3_34_56, 7_71, 39, 42_58, 8_89, 33_18, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 24_71, 2, PYTHON_CODE, ] @classmethod def lowercase (cls ) -> Optional[int]: _snake_case = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) _snake_case = 1 return cls def lowercase (self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50003 ) def lowercase (self ) -> Union[str, Any]: _snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase ) def lowercase (self ) -> Tuple: self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids ) _snake_case = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] _snake_case = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) _snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase ) def lowercase (self ) -> Union[str, Any]: _snake_case = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , UpperCAmelCase ) _snake_case = 10 _snake_case = self.tokenizer(UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) def lowercase (self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50004, 50001] ) def lowercase (self ) -> Optional[Any]: _snake_case = tempfile.mkdtemp() _snake_case = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase ) _snake_case = PLBartTokenizer.from_pretrained(UpperCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase ) @require_torch def lowercase (self ) -> Dict: _snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , return_tensors="""pt""" ) _snake_case = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , UpperCAmelCase ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def lowercase (self ) -> int: _snake_case = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) _snake_case = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) _snake_case = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def lowercase (self ) -> Optional[Any]: _snake_case = self.tokenizer(self.src_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=3 , return_tensors="""pt""" ) _snake_case = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=10 , return_tensors="""pt""" ) _snake_case = targets["""input_ids"""] _snake_case = shift_tokens_right(UpperCAmelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase (self ) -> Tuple: _snake_case = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(UpperCAmelCase ) , { # A, test, EOS, en_XX """input_ids""": [[150, 242, 2, 50003]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 50001, } , )
341
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCAmelCase = 8 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x * 255).int().clamp(0 , 255 ) _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" ) _snake_case = ((x & mask) != 0).float() _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" ) _snake_case = bits * 2 - 1 return bits def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x > 0).int() _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 ) _snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" ) return (dec / 255).clamp(0.0 , 1.0 ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _snake_case = self.alphas_cumprod[timestep] _snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu""" _snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise _snake_case = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): _snake_case = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 ) else: _snake_case = None # 1. compute alphas, betas _snake_case = self.alphas_cumprod[t] _snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one _snake_case = 1 - alpha_prod_t _snake_case = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _snake_case = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _snake_case = 0 if t > 0: _snake_case = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device ) _snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise _snake_case = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Tuple: super().__init__() _snake_case = bit_scale _snake_case = ( ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]: _snake_case = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , ) _snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale _snake_case = latents.to(self.device ) self.scheduler.set_timesteps(UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample _snake_case = bits_to_decimal(UpperCAmelCase ) if output_type == "pil": _snake_case = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
341
1
'''simple docstring''' class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: _snake_case = data _snake_case = previous _snake_case = next_node def __str__(self ) -> str: return f"""{self.data}""" def lowercase (self ) -> int: return self.data def lowercase (self ) -> Dict: return self.next def lowercase (self ) -> Union[str, Any]: return self.previous class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> List[str]: _snake_case = head def __iter__(self ) -> Optional[Any]: return self def lowercase (self ) -> str: if not self.current: raise StopIteration else: _snake_case = self.current.get_data() _snake_case = self.current.get_next() return value class _lowerCAmelCase : '''simple docstring''' def __init__(self ) -> Optional[int]: _snake_case = None # First node in list _snake_case = None # Last node in list def __str__(self ) -> Optional[int]: _snake_case = self.head _snake_case = [] while current is not None: nodes.append(current.get_data() ) _snake_case = current.get_next() return " ".join(str(UpperCAmelCase ) for node in nodes ) def __contains__(self , UpperCAmelCase ) -> int: _snake_case = self.head while current: if current.get_data() == value: return True _snake_case = current.get_next() return False def __iter__(self ) -> Union[str, Any]: return LinkedListIterator(self.head ) def lowercase (self ) -> str: if self.head: return self.head.get_data() return None def lowercase (self ) -> List[Any]: if self.tail: return self.tail.get_data() return None def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: _snake_case = node _snake_case = node else: self.insert_before_node(self.head , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: self.set_head(UpperCAmelCase ) else: self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: _snake_case = Node(UpperCAmelCase ) if self.head is None: self.set_head(UpperCAmelCase ) else: self.set_tail(UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.previous if node.get_previous() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.next if node.get_next() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = 1 _snake_case = Node(UpperCAmelCase ) _snake_case = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase , UpperCAmelCase ) return current_position += 1 _snake_case = node.next self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Node: _snake_case = self.head while node: if node.get_data() == item: return node _snake_case = node.get_next() raise Exception("""Node not found""" ) def lowercase (self , UpperCAmelCase ) -> Optional[int]: if (node := self.get_node(UpperCAmelCase )) is not None: if node == self.head: _snake_case = self.head.get_next() if node == self.tail: _snake_case = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase ) @staticmethod def lowercase (UpperCAmelCase ) -> None: if node.get_next(): _snake_case = node.previous if node.get_previous(): _snake_case = node.next _snake_case = None _snake_case = None def lowercase (self ) -> Dict: return self.head is None def __SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ): _snake_case = 1 _snake_case = 2 _snake_case = 0 _snake_case = 0 _snake_case = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
341
1
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor __lowerCAmelCase = logging.getLogger(__name__) __lowerCAmelCase = 50 # max width of layer names __lowerCAmelCase = 70 # max width of quantizer names def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = parser.add_argument_group("""quant_trainer arguments""" ) group.add_argument("""--wprec""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""weight precision""" ) group.add_argument("""--aprec""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""activation precision""" ) group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" ) group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" ) group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" ) group.add_argument("""--quant-disable-keyword""" , type=_SCREAMING_SNAKE_CASE , nargs="""+""" , help="""disable quantizers by keyword""" ) group.add_argument("""--quant-disable-layer-module""" , type=_SCREAMING_SNAKE_CASE , help="""disable quantizers by keyword under layer.""" ) group.add_argument("""--quant-enable-layer-module""" , type=_SCREAMING_SNAKE_CASE , help="""enable quantizers by keyword under layer""" ) group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" ) group.add_argument("""--percentile""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""percentile for PercentileCalibrator""" ) group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" ) group.add_argument("""--clip-gelu""" , metavar="""N""" , type=_SCREAMING_SNAKE_CASE , help="""clip gelu output maximum value to N""" ) group.add_argument( """--recalibrate-weights""" , action="""store_true""" , help=( """recalibrate weight amaxes by taking the max of the weights.""" """ amaxes will be computed with the current quantization granularity (axis).""" ) , ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if args.calibrator == "max": _snake_case = """max""" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("""Specify --percentile when using percentile calibrator""" ) _snake_case = """histogram""" elif args.calibrator == "mse": _snake_case = """histogram""" else: raise ValueError(f"""Invalid calibrator {args.calibrator}""" ) _snake_case = QuantDescriptor(num_bits=args.aprec , calib_method=_SCREAMING_SNAKE_CASE ) _snake_case = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_SCREAMING_SNAKE_CASE ) quant_nn.QuantLinear.set_default_quant_desc_weight(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ): logger.info("""Configuring Model for Quantization""" ) logger.info(f"""using quantization package {pytorch_quantization.__file__}""" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_SCREAMING_SNAKE_CASE , ["""embeddings"""] , which="""weight""" , _disabled=_SCREAMING_SNAKE_CASE ) if args.quant_disable: set_quantizer_by_name(_SCREAMING_SNAKE_CASE , [""""""] , _disabled=_SCREAMING_SNAKE_CASE ) if args.quant_disable_keyword: set_quantizer_by_name(_SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=_SCREAMING_SNAKE_CASE ) if args.quant_disable_layer_module: set_quantizer_by_name(_SCREAMING_SNAKE_CASE , [R"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=_SCREAMING_SNAKE_CASE ) if args.quant_enable_layer_module: set_quantizer_by_name(_SCREAMING_SNAKE_CASE , [R"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=_SCREAMING_SNAKE_CASE ) if args.recalibrate_weights: recalibrate_weights(_SCREAMING_SNAKE_CASE ) if args.fuse_qkv: fuse_qkv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if args.clip_gelu: clip_gelu(_SCREAMING_SNAKE_CASE , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): logger.info("""Enabling Calibration""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"""{name:80}: {module}""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): logger.info("""Loading calibrated amax""" ) for name, module in model.named_modules(): if name.endswith("""_quantizer""" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("""percentile""" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): def fusea(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for mod in [qq, qk, qv]: if not hasattr(_SCREAMING_SNAKE_CASE , """_amax""" ): print(""" WARNING: NO AMAX BUFFER""" ) return _snake_case = qq._amax.detach().item() _snake_case = qk._amax.detach().item() _snake_case = qv._amax.detach().item() _snake_case = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) qq._amax.fill_(_SCREAMING_SNAKE_CASE ) qk._amax.fill_(_SCREAMING_SNAKE_CASE ) qv._amax.fill_(_SCREAMING_SNAKE_CASE ) logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" ) for name, mod in model.named_modules(): if name.endswith(""".attention.self""" ): logger.info(f"""FUSE_QKV: {name:{name_width}}""" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for name, mod in model.named_modules(): if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ): _snake_case = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_SCREAMING_SNAKE_CASE ) _snake_case = mod._input_quantizer._amax.data.detach().item() logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): for name, mod in model.named_modules(): if hasattr(_SCREAMING_SNAKE_CASE , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None: _snake_case = mod.weight.shape[0] _snake_case = mod._weight_quantizer._amax.detach() _snake_case = torch.ones(_SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): for name, mod in model.named_modules(): if hasattr(_SCREAMING_SNAKE_CASE , """_weight_quantizer""" ): if not hasattr(mod.weight_quantizer , """_amax""" ): print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_SCREAMING_SNAKE_CASE , keepdims=_SCREAMING_SNAKE_CASE ).detach() logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" ) _snake_case = amax def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=180 , _SCREAMING_SNAKE_CASE=None ): if ignore is None: _snake_case = [] elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [ignore] _snake_case = 0 for name, mod in model.named_modules(): if not hasattr(_SCREAMING_SNAKE_CASE , """weight""" ): continue _snake_case = max(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) for name, mod in model.named_modules(): _snake_case = getattr(_SCREAMING_SNAKE_CASE , """_input_quantizer""" , _SCREAMING_SNAKE_CASE ) _snake_case = getattr(_SCREAMING_SNAKE_CASE , """_weight_quantizer""" , _SCREAMING_SNAKE_CASE ) if not hasattr(_SCREAMING_SNAKE_CASE , """weight""" ): continue if type(_SCREAMING_SNAKE_CASE ) in ignore: continue if [True for s in ignore if type(_SCREAMING_SNAKE_CASE ) is str and s in name]: continue _snake_case = f"""Act:{input_q.extra_repr()}""" _snake_case = f"""Wgt:{weight_q.extra_repr()}""" _snake_case = f"""{name:{name_width}} {act_str} {wgt_str}""" if len(_SCREAMING_SNAKE_CASE ) <= line_width: logger.info(_SCREAMING_SNAKE_CASE ) else: logger.info(f"""{name:{name_width}} {act_str}""" ) logger.info(f"""{" ":{name_width}} {wgt_str}""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = 0 for name, mod in model.named_modules(): if isinstance(_SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ): print(f"""{name:80} {mod}""" ) count += 1 print(f"""{count} TensorQuantizers found in model""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if quantizer_mod is not None: assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: logger.warning(f"""{name} has no {quantizer}""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="both" , **_SCREAMING_SNAKE_CASE ): _snake_case = f"""Warning: changing {which} quantizers of {name:{qname_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" if which in ["input", "both"]: set_quantizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """_input_quantizer""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if which in ["weight", "both"]: set_quantizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """_weight_quantizer""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): for name, mod in model.named_modules(): if hasattr(_SCREAMING_SNAKE_CASE , """_input_quantizer""" ) or hasattr(_SCREAMING_SNAKE_CASE , """_weight_quantizer""" ): for n in names: if re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): set_quantizers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) elif name.endswith("""_quantizer""" ): for n in names: if re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""Warning: changing {name:{name_width}}""" for k, v in kwargs.items(): s += f""" {k}={v}""" setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info(_SCREAMING_SNAKE_CASE )
341
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "deberta-v2" def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]: super().__init__(**UpperCAmelCase ) _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = relative_attention _snake_case = max_relative_positions _snake_case = pad_token_id _snake_case = position_biased_input # Backwards compatibility if type(UpperCAmelCase ) == str: _snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )] _snake_case = pos_att_type _snake_case = vocab_size _snake_case = layer_norm_eps _snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase ) _snake_case = pooler_dropout _snake_case = pooler_hidden_act class _lowerCAmelCase ( __snake_case ): '''simple docstring''' @property def lowercase (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowercase (self ) -> int: return 12 def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]: _snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
341
1
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=224 , UpperCAmelCase=1000 , UpperCAmelCase=[3, 3, 6, 4] , UpperCAmelCase=[48, 56, 112, 220] , ) -> Union[str, Any]: _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = num_labels _snake_case = image_size _snake_case = layer_depths _snake_case = embed_dims def lowercase (self ) -> Any: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase (self ) -> int: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase , layer_scale_init_value=1e-5 , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: _snake_case = SwiftFormerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: _snake_case = self.num_labels _snake_case = SwiftFormerForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) _snake_case = SwiftFormerForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase (self ) -> Dict: ((_snake_case), (_snake_case), (_snake_case)) = self.prepare_config_and_inputs() _snake_case = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCAmelCase_ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Union[str, Any]: _snake_case = SwiftFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowercase (self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def lowercase (self ) -> List[str]: pass def lowercase (self ) -> Any: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def lowercase (self ) -> int: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def lowercase (self ) -> Union[str, Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) @slow def lowercase (self ) -> List[str]: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = SwiftFormerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def lowercase (self ) -> Optional[Any]: pass def lowercase (self ) -> Optional[int]: def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.hidden_states _snake_case = 8 self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Dict: def _config_zero_init(UpperCAmelCase ): _snake_case = copy.deepcopy(UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCAmelCase , UpperCAmelCase , 1e-1_0 ) if isinstance(getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ): _snake_case = _config_zero_init(getattr(UpperCAmelCase , UpperCAmelCase ) ) setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return configs_no_init _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = _config_zero_init(UpperCAmelCase ) for model_class in self.all_model_classes: _snake_case = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> Any: pass def __SCREAMING_SNAKE_CASE ( ): _snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase (self ) -> Dict: return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def lowercase (self ) -> Any: _snake_case = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(UpperCAmelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # verify the logits _snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
341
'''simple docstring''' __lowerCAmelCase = [ (1_000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} _snake_case = 0 _snake_case = 0 while place < len(_SCREAMING_SNAKE_CASE ): if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] for arabic, roman in ROMAN: ((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) result.append(roman * factor ) if number == 0: break return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCAmelCase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' __lowerCAmelCase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' __lowerCAmelCase = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]: _snake_case = spearmanr(UpperCAmelCase , UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['PerceiverFeatureExtractor'] __lowerCAmelCase = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def __SCREAMING_SNAKE_CASE ( ): print("""Making key files...""" ) make_key_files("""rsa""" , 1024 ) print("""Key files generation successful.""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): print("""Generating prime p...""" ) _snake_case = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE ) print("""Generating prime q...""" ) _snake_case = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE ) _snake_case = p * q print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" ) while True: _snake_case = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1: break print("""Calculating d that is mod inverse of e...""" ) _snake_case = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) _snake_case = (n, e) _snake_case = (n, d) return (public_key, private_key) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print("""\nWARNING:""" ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" """Use a different name or delete these files and re-run this program.""" ) sys.exit() _snake_case, _snake_case = generate_key(_SCREAMING_SNAKE_CASE ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , """w""" ) as out_file: out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , """w""" ) as out_file: out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
341
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __lowerCAmelCase = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): if attention_mask is None: _snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = eos_token_id _snake_case = pad_token_id _snake_case = bos_token_id _snake_case = initializer_range def lowercase (self ) -> str: _snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 ) _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , ) _snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def lowercase (self ) -> Dict: _snake_case, _snake_case = self.prepare_config_and_inputs() return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: _snake_case = 20 _snake_case = model_class_name(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] ) _snake_case, _snake_case = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) _snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _snake_case = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _snake_case = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , ) _snake_case = model.decode(UpperCAmelCase , UpperCAmelCase ) _snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: _snake_case = 20 _snake_case = model_class_name(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] ) _snake_case, _snake_case = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _snake_case = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) _snake_case = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _snake_case = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase ) _snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = 99 def lowercase (self ) -> Any: _snake_case = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _snake_case = input_ids.shape[0] _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase (self ) -> Optional[Any]: _snake_case, _snake_case, _snake_case = self._get_config_and_data() _snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) _snake_case = lm_model(input_ids=UpperCAmelCase ) _snake_case = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) _snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase ) _snake_case = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 ) _snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() _snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ): '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase (self ) -> Any: _snake_case = FlaxBlenderbotModelTester(self ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) _snake_case = model_class(UpperCAmelCase ) @jax.jit def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase ) with self.subTest("""JIT Enabled""" ): _snake_case = encode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _snake_case = encode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = model_class(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _snake_case = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): return model.decode( decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , ) with self.subTest("""JIT Enabled""" ): _snake_case = decode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _snake_case = decode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase (self ) -> Any: for model_class_name in self.all_model_classes: _snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _snake_case = np.ones((1, 1) ) * model.config.eos_token_id _snake_case = model(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" ) @slow def lowercase (self ) -> Dict: _snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25} _snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} _snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase ) _snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" ) _snake_case = ["""Sam"""] _snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" ) _snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase ) _snake_case = """Sam is a great name. It means \"sun\" in Gaelic.""" _snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase ) assert generated_txt[0].strip() == tgt_text
341
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _snake_case = flax_key_tuple[:-1] + ("""weight""",) _snake_case = torch.permute(_SCREAMING_SNAKE_CASE , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ): # linear layer _snake_case = flax_key_tuple[:-1] + ("""weight""",) _snake_case = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if "metadata" in layer: _snake_case = layer.split("""metadata""" ) _snake_case = """""".join(split_layer[0] )[:-1] _snake_case = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: _snake_case = layer.split("""kvstore""" ) _snake_case = """""".join(split_layer[0] )[:-1] _snake_case = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: _snake_case = layer.split("""/""" ) _snake_case = """/""".join(split_layer[:-1] ) _snake_case = (split_layer[-1],) if "kvstore/path" in layer: _snake_case = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: _snake_case = """file""" else: _snake_case = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = rename_keys(_SCREAMING_SNAKE_CASE ) _snake_case = {} for k, v in current_block.items(): _snake_case = v _snake_case = new_current_block torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ): _snake_case = convert_file_size_to_int(_SCREAMING_SNAKE_CASE ) _snake_case = [] _snake_case = {} _snake_case = 0 _snake_case = 0 os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: _snake_case = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] _snake_case = flatten_dict(_SCREAMING_SNAKE_CASE , sep="""/""" ) _snake_case = {} for layer in checkpoint_info.keys(): _snake_case, _snake_case, _snake_case = get_key_and_tensorstore_dict( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if curr_real_layer_name in all_layers: _snake_case = content else: _snake_case = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) _snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts _snake_case, _snake_case = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _SCREAMING_SNAKE_CASE ) _snake_case = """/""".join(_SCREAMING_SNAKE_CASE ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _snake_case = os.path.join( _SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) ) rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(current_block.keys() ) del current_block _snake_case = {} _snake_case = 0 _snake_case = raw_weights.to(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) current_block_size += weight_size total_size += weight_size # Add the last block _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) ) rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(_SCREAMING_SNAKE_CASE ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _snake_case = {} _snake_case = {} for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ): _snake_case = weights_name.replace( """.bin""" , f"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" ) # len(sharded_state_dicts):05d} _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _snake_case = shard for key in shard: _snake_case = shard_file # Add the metadata _snake_case = {"""total_size""": total_size} _snake_case = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" , encoding="""utf-8""" ) as f: _snake_case = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n""" f.write(_SCREAMING_SNAKE_CASE ) return metadata, index if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) __lowerCAmelCase = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __SCREAMING_SNAKE_CASE ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _snake_case = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) _snake_case = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) _snake_case = TaTokenizer.from_pretrained("""t5-small""" ) _snake_case = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" _snake_case = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids _snake_case = model.generate(_SCREAMING_SNAKE_CASE , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
341
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]: _snake_case = parent _snake_case = batch_size _snake_case = is_training _snake_case = use_auxiliary_loss _snake_case = num_queries _snake_case = num_channels _snake_case = min_size _snake_case = max_size _snake_case = num_labels _snake_case = mask_feature_size def lowercase (self ) -> str: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCAmelCase ) _snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase ) _snake_case = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5 ).float() _snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long() _snake_case = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase (self ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase (self ) -> Optional[Any]: _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs() _snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int: _snake_case = output.encoder_hidden_states _snake_case = output.pixel_decoder_hidden_states _snake_case = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]: with torch.no_grad(): _snake_case = MaskFormerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase ) _snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: _snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() def comm_check_on_output(UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase ) _snake_case = model(UpperCAmelCase ) comm_check_on_output(UpperCAmelCase ) _snake_case = model( pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ) comm_check_on_output(UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCAmelCase_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> int: _snake_case = MaskFormerModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase ) def lowercase (self ) -> int: self.config_tester.run_common_tests() def lowercase (self ) -> List[Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase ) def lowercase (self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def lowercase (self ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def lowercase (self ) -> Optional[int]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def lowercase (self ) -> Optional[int]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> Optional[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> Tuple: pass def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) @slow def lowercase (self ) -> int: for model_name in ["facebook/maskformer-swin-small-coco"]: _snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = (self.model_tester.min_size,) * 2 _snake_case = { """pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ), """class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(), } _snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase ) _snake_case = model(**UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def lowercase (self ) -> Dict: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase ) _snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def lowercase (self ) -> Tuple: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _snake_case = self.all_model_classes[1] _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() _snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss loss.backward() def lowercase (self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss _snake_case = self.all_model_classes[1] _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = True _snake_case = True _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() _snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ) _snake_case = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _snake_case = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _snake_case = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _snake_case = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __lowerCAmelCase = 1E-4 def __SCREAMING_SNAKE_CASE ( ): _snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase (self ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def lowercase (self ) -> str: _snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) _snake_case = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) _snake_case = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> List[str]: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [ [1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0], [3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0], [1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0], ] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> List[Any]: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> Tuple: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) _snake_case = inputs["""pixel_values"""].to(UpperCAmelCase ) _snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]] _snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
341
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = LEDConfig lowerCAmelCase_ = {} lowerCAmelCase_ = "gelu" def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=20 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=4 , ) -> Any: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = eos_token_id _snake_case = pad_token_id _snake_case = bos_token_id _snake_case = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowercase (self ) -> Tuple: _snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case = prepare_led_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) _snake_case = tf.concat( [tf.zeros_like(UpperCAmelCase )[:, :-1], tf.ones_like(UpperCAmelCase )[:, -1:]] , axis=-1 , ) _snake_case = global_attention_mask return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple: _snake_case = TFLEDModel(config=UpperCAmelCase ).get_decoder() _snake_case = inputs_dict["""input_ids"""] _snake_case = input_ids[:1, :] _snake_case = inputs_dict["""attention_mask"""][:1, :] _snake_case = 1 # first forward pass _snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase ) _snake_case, _snake_case = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0] _snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case = output_from_no_past[:, -3:, random_slice_idx] _snake_case = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): if attention_mask is None: _snake_case = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCAmelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase_ = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Optional[Any]: _snake_case = TFLEDModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase ) def lowercase (self ) -> Tuple: self.config_tester.run_common_tests() def lowercase (self ) -> Optional[int]: _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = tf.zeros_like(inputs_dict["""attention_mask"""] ) _snake_case = 2 _snake_case = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) _snake_case = True _snake_case = self.model_tester.seq_length _snake_case = self.model_tester.encoder_seq_length def check_decoder_attentions_output(UpperCAmelCase ): _snake_case = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(UpperCAmelCase ): _snake_case = [t.numpy() for t in outputs.encoder_attentions] _snake_case = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = False _snake_case = model_class(UpperCAmelCase ) _snake_case = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: _snake_case = model_class(UpperCAmelCase ) _snake_case = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(UpperCAmelCase ) _snake_case = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine _snake_case = True _snake_case = True _snake_case = model_class(UpperCAmelCase ) _snake_case = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def lowercase (self ) -> Union[str, Any]: pass def lowercase (self ) -> Optional[int]: # TODO: Head-masking not yet implement pass def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa ) __lowerCAmelCase = 1E-4 @slow @require_tf class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> List[Any]: _snake_case = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here _snake_case = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) _snake_case = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) _snake_case = prepare_led_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase ) _snake_case = model(**UpperCAmelCase )[0] _snake_case = (1, 1024, 768) self.assertEqual(output.shape , UpperCAmelCase ) # change to expected output here _snake_case = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-3 ) def lowercase (self ) -> Optional[int]: _snake_case = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here _snake_case = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) _snake_case = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) _snake_case = prepare_led_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase ) _snake_case = model(**UpperCAmelCase )[0] _snake_case = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , UpperCAmelCase ) # change to expected output here _snake_case = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-3 , rtol=1e-3 )
341
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self , UpperCAmelCase ) -> Union[str, Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): _snake_case = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sgugger/tiny-distilbert-classification""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) # set architectures equal to `None` _snake_case = None _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def lowercase (self ) -> Tuple: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Any: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> int: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> str: _snake_case = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() ) def lowercase (self ) -> int: _snake_case = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCAmelCase ): self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """current""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() )
341
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _snake_case, _snake_case = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _snake_case = int(max_value - min_value ) + 1 _snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
341
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'vocab_file': 'spiece.model'} __lowerCAmelCase = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<eop>", "<eod>"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: _snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token _snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) _snake_case = 3 _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _snake_case = jieba _snake_case = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def lowercase (self ) -> List[Any]: return len(self.sp_model ) def lowercase (self ) -> Optional[int]: _snake_case = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ) -> List[Any]: _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__(self , UpperCAmelCase ) -> Optional[Any]: _snake_case = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _snake_case = {} _snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase (self , UpperCAmelCase ) -> Optional[Any]: if self.remove_space: _snake_case = """ """.join(inputs.strip().split() ) else: _snake_case = inputs _snake_case = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _snake_case = unicodedata.normalize("""NFKD""" , UpperCAmelCase ) _snake_case = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase )] ) if self.do_lower_case: _snake_case = outputs.lower() return outputs def lowercase (self , UpperCAmelCase ) -> List[str]: _snake_case = self.preprocess_text(UpperCAmelCase ) _snake_case = self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) _snake_case = [] for piece in pieces: if len(UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _snake_case = cur_pieces[1:] else: _snake_case = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase ) else: new_pieces.append(UpperCAmelCase ) return new_pieces def lowercase (self , UpperCAmelCase ) -> str: return self.sp_model.PieceToId(UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> List[Any]: return self.sp_model.IdToPiece(UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> List[str]: _snake_case = """""".join(UpperCAmelCase ).replace(UpperCAmelCase , """ """ ).strip() return out_string def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1, 1] return ([0] * len(UpperCAmelCase )) + [1, 1] def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _snake_case = os.path.join( UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , """wb""" ) as fi: _snake_case = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,) def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> str: _snake_case = super()._decode(*UpperCAmelCase , **UpperCAmelCase ) _snake_case = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
341
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: if not conversation_id: _snake_case = uuid.uuida() if past_user_inputs is None: _snake_case = [] if generated_responses is None: _snake_case = [] _snake_case = conversation_id _snake_case = past_user_inputs _snake_case = generated_responses _snake_case = text def __eq__(self , UpperCAmelCase ) -> Dict: if not isinstance(UpperCAmelCase , UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = False ) -> int: if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) _snake_case = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: _snake_case = text def lowercase (self ) -> int: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) _snake_case = None def lowercase (self , UpperCAmelCase ) -> Any: self.generated_responses.append(UpperCAmelCase ) def lowercase (self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__(self ) -> Optional[int]: _snake_case = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): _snake_case = """user""" if is_user else """bot""" output += f"""{name} >> {text} \n""" return output @add_end_docstrings( __snake_case , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.tokenizer.pad_token_id is None: _snake_case = self.tokenizer.eos_token def lowercase (self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict: _snake_case = {} _snake_case = {} _snake_case = {} if min_length_for_response is not None: _snake_case = min_length_for_response if minimum_tokens is not None: _snake_case = minimum_tokens if "max_length" in generate_kwargs: _snake_case = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__(self , UpperCAmelCase , UpperCAmelCase=0 , **UpperCAmelCase ) -> Union[str, Any]: _snake_case = super().__call__(UpperCAmelCase , num_workers=UpperCAmelCase , **UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1: return outputs[0] return outputs def lowercase (self , UpperCAmelCase , UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): _snake_case = self.tokenizer._build_conversation_input_ids(UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version _snake_case = self._legacy_parse_and_tokenize(UpperCAmelCase ) if self.framework == "pt": _snake_case = torch.LongTensor([input_ids] ) elif self.framework == "tf": _snake_case = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowercase (self , UpperCAmelCase , UpperCAmelCase=10 , **UpperCAmelCase ) -> Optional[int]: _snake_case = generate_kwargs.get("""max_length""" , self.model.config.max_length ) _snake_case = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) _snake_case = max_length - minimum_tokens _snake_case = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: _snake_case = model_inputs["""attention_mask"""][:, -trim:] _snake_case = model_inputs.pop("""conversation""" ) _snake_case = max_length _snake_case = self.model.generate(**UpperCAmelCase , **UpperCAmelCase ) if self.model.config.is_encoder_decoder: _snake_case = 1 else: _snake_case = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowercase (self , UpperCAmelCase , UpperCAmelCase=True ) -> List[str]: _snake_case = model_outputs["""output_ids"""] _snake_case = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) _snake_case = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCAmelCase ) return conversation def lowercase (self , UpperCAmelCase ) -> Dict: _snake_case = self.tokenizer.eos_token_id _snake_case = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) if len(UpperCAmelCase ) > self.tokenizer.model_max_length: _snake_case = input_ids[-self.tokenizer.model_max_length :] return input_ids
341
1
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCAmelCase = 8 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x * 255).int().clamp(0 , 255 ) _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" ) _snake_case = ((x & mask) != 0).float() _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" ) _snake_case = bits * 2 - 1 return bits def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x > 0).int() _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 ) _snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" ) return (dec / 255).clamp(0.0 , 1.0 ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _snake_case = self.alphas_cumprod[timestep] _snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu""" _snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise _snake_case = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): _snake_case = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 ) else: _snake_case = None # 1. compute alphas, betas _snake_case = self.alphas_cumprod[t] _snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one _snake_case = 1 - alpha_prod_t _snake_case = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _snake_case = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _snake_case = 0 if t > 0: _snake_case = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device ) _snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise _snake_case = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Tuple: super().__init__() _snake_case = bit_scale _snake_case = ( ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]: _snake_case = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , ) _snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale _snake_case = latents.to(self.device ) self.scheduler.set_timesteps(UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample _snake_case = bits_to_decimal(UpperCAmelCase ) if output_type == "pil": _snake_case = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
341
'''simple docstring''' from math import factorial, radians def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ): _snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _snake_case = radians(_SCREAMING_SNAKE_CASE ) _snake_case = angle_in_radians _snake_case = 3 _snake_case = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) _snake_case = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__('doctest').testmod()
341
1
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCAmelCase = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __lowerCAmelCase = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' __lowerCAmelCase = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): def remove_articles(_SCREAMING_SNAKE_CASE ): _snake_case = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE ) return re.sub(_SCREAMING_SNAKE_CASE , """ """ , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE ): _snake_case = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 100 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [rgram for rgrams in rgramslist for rgram in rgrams] _snake_case = Counter(_SCREAMING_SNAKE_CASE ) _snake_case = Counter(_SCREAMING_SNAKE_CASE ) _snake_case = Counter() for sgram, scount in sgramcounter.items(): _snake_case = scount * numref _snake_case = Counter(_SCREAMING_SNAKE_CASE ) _snake_case = Counter() for cgram, ccount in cgramcounter.items(): _snake_case = ccount * numref # KEEP _snake_case = sgramcounter_rep & cgramcounter_rep _snake_case = keepgramcounter_rep & rgramcounter _snake_case = sgramcounter_rep & rgramcounter _snake_case = 0 _snake_case = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _snake_case = 1 _snake_case = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = keeptmpscorea / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _snake_case = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _snake_case = 0 if keepscore_precision > 0 or keepscore_recall > 0: _snake_case = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _snake_case = sgramcounter_rep - cgramcounter_rep _snake_case = delgramcounter_rep - rgramcounter _snake_case = sgramcounter_rep - rgramcounter _snake_case = 0 _snake_case = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _snake_case = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = deltmpscorea / len(_SCREAMING_SNAKE_CASE ) # ADDITION _snake_case = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _snake_case = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE ) _snake_case = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _snake_case = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _snake_case = 1 _snake_case = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = addtmpscore / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = addtmpscore / len(_SCREAMING_SNAKE_CASE ) _snake_case = 0 if addscore_precision > 0 or addscore_recall > 0: _snake_case = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = len(_SCREAMING_SNAKE_CASE ) _snake_case = ssent.split(""" """ ) _snake_case = csent.split(""" """ ) _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] _snake_case = [] for rsent in rsents: _snake_case = rsent.split(""" """ ) _snake_case = [] _snake_case = [] _snake_case = [] ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _snake_case = ragrams[i] + """ """ + ragrams[i + 1] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _snake_case = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _snake_case = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3] ragrams.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _snake_case = sagrams[i] + """ """ + sagrams[i + 1] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _snake_case = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _snake_case = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3] sagrams.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _snake_case = cagrams[i] + """ """ + cagrams[i + 1] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _snake_case = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _snake_case = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3] cagrams.append(_SCREAMING_SNAKE_CASE ) ((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_snake_case), (_snake_case), (_snake_case)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _snake_case = sum([delascore, delascore, delascore, delascore] ) / 4 _snake_case = sum([addascore, addascore, addascore, addascore] ) / 4 _snake_case = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "13a" , _SCREAMING_SNAKE_CASE = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: _snake_case = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _snake_case = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE ) else: _snake_case = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": _snake_case = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": _snake_case = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE ) else: _snake_case = sentence if not return_str: _snake_case = normalized_sent.split() return normalized_sent def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )): raise ValueError("""Sources length must match predictions and references lengths.""" ) _snake_case = 0 for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] ) _snake_case = sari_score / len(_SCREAMING_SNAKE_CASE ) return 100 * sari_score def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="exp" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ): _snake_case = len(references[0] ) if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) _snake_case = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )] _snake_case = sacrebleu.corpus_bleu( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=[ """https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""", """https://github.com/cocoxu/simplification/blob/master/SARI.py""", """https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""", """https://github.com/mjpost/sacreBLEU""", ] , reference_urls=[ """https://www.aclweb.org/anthology/Q16-1029.pdf""", """https://github.com/mjpost/sacreBLEU""", """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: _snake_case = {} result.update({"""sari""": compute_sari(sources=UpperCAmelCase , predictions=UpperCAmelCase , references=UpperCAmelCase )} ) result.update({"""sacrebleu""": compute_sacrebleu(predictions=UpperCAmelCase , references=UpperCAmelCase )} ) result.update({"""exact""": compute_em(predictions=UpperCAmelCase , references=UpperCAmelCase )} ) return result
341
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __lowerCAmelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __lowerCAmelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n' __lowerCAmelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[ """https://github.com/m-popovic/chrF""", ] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = CHRF.CHAR_ORDER , UpperCAmelCase = CHRF.WORD_ORDER , UpperCAmelCase = CHRF.BETA , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , ) -> int: _snake_case = len(references[0] ) if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) _snake_case = [[refs[i] for refs in references] for i in range(UpperCAmelCase )] _snake_case = CHRF(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) _snake_case = sb_chrf.corpus_score(UpperCAmelCase , UpperCAmelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
341
1
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) < k or k < 0: raise ValueError("""Invalid Input""" ) _snake_case = _snake_case = sum(array[:k] ) for i in range(len(_SCREAMING_SNAKE_CASE ) - k ): _snake_case = current_sum - array[i] + array[i + k] _snake_case = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __lowerCAmelCase = [randint(-1_000, 1_000) for i in range(100)] __lowerCAmelCase = randint(0, 110) print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
341
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCAmelCase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' __lowerCAmelCase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' __lowerCAmelCase = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]: _snake_case = spearmanr(UpperCAmelCase , UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
341
1
'''simple docstring''' from __future__ import annotations from decimal import Decimal from numpy import array def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix _snake_case = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements _snake_case = [[0.0, 0.0], [0.0, 0.0]] _snake_case, _snake_case = matrix[1][1], matrix[0][0] _snake_case, _snake_case = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_SCREAMING_SNAKE_CASE ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule _snake_case = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix _snake_case = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] _snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) _snake_case = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) _snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) _snake_case = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) _snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) _snake_case = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) _snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) _snake_case = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) _snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) _snake_case = array(_SCREAMING_SNAKE_CASE ) for i in range(3 ): for j in range(3 ): _snake_case = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix _snake_case = array(_SCREAMING_SNAKE_CASE ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE ) # Calculate the inverse of the matrix return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
341
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = out_features _snake_case = num_labels _snake_case = scope _snake_case = num_stages def lowercase (self ) -> List[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase (self ) -> Tuple: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowercase (self ) -> Any: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: _snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowercase (self ) -> Tuple: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ), ( _snake_case ), ( _snake_case ), ) = config_and_inputs _snake_case = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Optional[Any]: _snake_case = UperNetModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def lowercase (self ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase (self ) -> Union[str, Any]: return def lowercase (self ) -> Union[str, Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def lowercase (self ) -> List[str]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> int: pass def lowercase (self ) -> List[str]: def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = _config_zero_init(UpperCAmelCase ) _snake_case = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def lowercase (self ) -> Optional[Any]: pass @slow def lowercase (self ) -> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
341
1
'''simple docstring''' import collections import os import re from pathlib import Path __lowerCAmelCase = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase = re.compile(r'^\s*try:') # Catches a line with else: __lowerCAmelCase = re.compile(r'^\s*else:') def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None: return None _snake_case = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _snake_case = f.readlines() _snake_case = 0 while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure _snake_case = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: _snake_case = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ): _snake_case = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0] _snake_case = re.findall(R"""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue _snake_case = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: _snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 _snake_case = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. _snake_case = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _snake_case = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _snake_case = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): _snake_case = lines[line_index] if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None: _snake_case = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ ) _snake_case = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None: _snake_case = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ ) _snake_case = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 _snake_case = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _snake_case = [] while ( line_index < len(_SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): _snake_case = lines[line_index] _snake_case = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 _snake_case = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(_SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. _snake_case = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _snake_case = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _snake_case = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): _snake_case = lines[line_index] _snake_case = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 _snake_case = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): def find_duplicates(_SCREAMING_SNAKE_CASE ): return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _snake_case = [] for key in import_dict_objects.keys(): _snake_case = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) _snake_case = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _snake_case = """base imports""" if key == """none""" else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ): _snake_case = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) _snake_case = parse_init(_SCREAMING_SNAKE_CASE ) if objects is not None: _snake_case = analyze_results(*_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = [] for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(_SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0: continue _snake_case = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) ) _snake_case = short_path.replace(os.path.sep , """.""" ) submodules.append(_SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue _snake_case = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) ) _snake_case = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(_SCREAMING_SNAKE_CASE ) return submodules __lowerCAmelCase = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def __SCREAMING_SNAKE_CASE ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _snake_case = direct_transformers_import(_SCREAMING_SNAKE_CASE ) _snake_case = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , """r""" ) as f: _snake_case = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _SCREAMING_SNAKE_CASE ) ) ) _snake_case = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = """\n""".join(f"""- {module}""" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" f"""{list_of_modules}\n""" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
341
'''simple docstring''' import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = f.readlines() _snake_case = f"""class {class_name}(""" _snake_case = f"""{4 * " "}def {test_name}(""" _snake_case = f"""{8 * " "}{correct_line.split()[0]}""" _snake_case = f"""{16 * " "}{correct_line.split()[0]}""" _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = 0 _snake_case = 0 _snake_case = [] for line in lines: if line.startswith(_SCREAMING_SNAKE_CASE ): _snake_case = True elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ): _snake_case = True elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )): _snake_case = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _snake_case = True if in_class and in_func and in_line: if ")" not in line: continue else: _snake_case = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * " "}{correct_line}""" ) _snake_case = _snake_case = _snake_case = _snake_case = False else: new_lines.append(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): if fail is not None: with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = {l.strip() for l in f.readlines()} else: _snake_case = None with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = f.readlines() _snake_case = defaultdict(_SCREAMING_SNAKE_CASE ) for line in correct_lines: _snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) __lowerCAmelCase = parser.parse_args() main(args.correct_filename, args.fail_filename)
341
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "gpt_neox" def __init__(self , UpperCAmelCase=50432 , UpperCAmelCase=6144 , UpperCAmelCase=44 , UpperCAmelCase=64 , UpperCAmelCase=24576 , UpperCAmelCase="gelu" , UpperCAmelCase=0.25 , UpperCAmelCase=10000 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=2048 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]: super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = rotary_pct _snake_case = rotary_emb_base _snake_case = attention_dropout _snake_case = hidden_dropout _snake_case = classifier_dropout _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = use_cache _snake_case = tie_word_embeddings _snake_case = use_parallel_residual _snake_case = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""" ) def lowercase (self ) -> Union[str, Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f"""got {self.rope_scaling}""" ) _snake_case = self.rope_scaling.get("""type""" , UpperCAmelCase ) _snake_case = self.rope_scaling.get("""factor""" , UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(UpperCAmelCase , UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) _snake_case = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = ["pixel_values"] def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: super().__init__(**UpperCAmelCase ) _snake_case = size if size is not None else {"""height""": 256, """width""": 256} _snake_case = get_size_dict(UpperCAmelCase ) _snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = do_center_crop _snake_case = crop_size _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_normalize _snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = resample if resample is not None else self.resample _snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case = do_rescale if do_rescale is not None else self.do_rescale _snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case = do_normalize if do_normalize is not None else self.do_normalize _snake_case = image_mean if image_mean is not None else self.image_mean _snake_case = image_std if image_std is not None else self.image_std _snake_case = size if size is not None else self.size _snake_case = get_size_dict(UpperCAmelCase ) _snake_case = crop_size if crop_size is not None else self.crop_size _snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) _snake_case = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _snake_case = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: _snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: _snake_case = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: _snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: _snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] _snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] _snake_case = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
341
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
'''simple docstring''' __lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure the supplied data is a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(_SCREAMING_SNAKE_CASE ) _snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data ) _snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0 if padding_needed: # The padding that will be added later _snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6) else: _snake_case = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode() + padding ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = ( """argument should be a bytes-like object or ASCII string, """ f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): try: _snake_case = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) _snake_case = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _snake_case = encoded_data[:-padding] _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data ) _snake_case = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 ) ] return bytes(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "gpt_bigcode" lowerCAmelCase_ = ["past_key_values"] lowerCAmelCase_ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__(self , UpperCAmelCase=50257 , UpperCAmelCase=1024 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=None , UpperCAmelCase="gelu_pytorch_tanh" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=50256 , UpperCAmelCase=50256 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]: _snake_case = vocab_size _snake_case = n_positions _snake_case = n_embd _snake_case = n_layer _snake_case = n_head _snake_case = n_inner _snake_case = activation_function _snake_case = resid_pdrop _snake_case = embd_pdrop _snake_case = attn_pdrop _snake_case = layer_norm_epsilon _snake_case = initializer_range _snake_case = scale_attn_weights _snake_case = use_cache _snake_case = attention_softmax_in_fpaa _snake_case = scale_attention_softmax_in_fpaa _snake_case = multi_query _snake_case = bos_token_id _snake_case = eos_token_id super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
341
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) _snake_case = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = int(_SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(_SCREAMING_SNAKE_CASE ) _snake_case, _snake_case = divmod(_SCREAMING_SNAKE_CASE , 2 ) return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = str(_SCREAMING_SNAKE_CASE ).strip() if not number: raise ValueError("""No input value was provided""" ) _snake_case = """-""" if number.startswith("""-""" ) else """""" _snake_case = number.lstrip("""-""" ) if not number.isnumeric(): raise ValueError("""Input value is not an integer""" ) return f"""{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> None: warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
341
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union __lowerCAmelCase = TypeVar('T') __lowerCAmelCase = Union[List[T], Tuple[T, ...]] __lowerCAmelCase = Union[T, List[T], Dict[str, T]] __lowerCAmelCase = Union[str, bytes, os.PathLike]
341
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self , UpperCAmelCase ) -> Union[str, Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): _snake_case = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sgugger/tiny-distilbert-classification""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) # set architectures equal to `None` _snake_case = None _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def lowercase (self ) -> Tuple: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Any: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> int: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> str: _snake_case = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() ) def lowercase (self ) -> int: _snake_case = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCAmelCase ): self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """current""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() )
341
'''simple docstring''' class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: _snake_case = data _snake_case = previous _snake_case = next_node def __str__(self ) -> str: return f"""{self.data}""" def lowercase (self ) -> int: return self.data def lowercase (self ) -> Dict: return self.next def lowercase (self ) -> Union[str, Any]: return self.previous class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> List[str]: _snake_case = head def __iter__(self ) -> Optional[Any]: return self def lowercase (self ) -> str: if not self.current: raise StopIteration else: _snake_case = self.current.get_data() _snake_case = self.current.get_next() return value class _lowerCAmelCase : '''simple docstring''' def __init__(self ) -> Optional[int]: _snake_case = None # First node in list _snake_case = None # Last node in list def __str__(self ) -> Optional[int]: _snake_case = self.head _snake_case = [] while current is not None: nodes.append(current.get_data() ) _snake_case = current.get_next() return " ".join(str(UpperCAmelCase ) for node in nodes ) def __contains__(self , UpperCAmelCase ) -> int: _snake_case = self.head while current: if current.get_data() == value: return True _snake_case = current.get_next() return False def __iter__(self ) -> Union[str, Any]: return LinkedListIterator(self.head ) def lowercase (self ) -> str: if self.head: return self.head.get_data() return None def lowercase (self ) -> List[Any]: if self.tail: return self.tail.get_data() return None def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: _snake_case = node _snake_case = node else: self.insert_before_node(self.head , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: self.set_head(UpperCAmelCase ) else: self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: _snake_case = Node(UpperCAmelCase ) if self.head is None: self.set_head(UpperCAmelCase ) else: self.set_tail(UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.previous if node.get_previous() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.next if node.get_next() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = 1 _snake_case = Node(UpperCAmelCase ) _snake_case = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase , UpperCAmelCase ) return current_position += 1 _snake_case = node.next self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Node: _snake_case = self.head while node: if node.get_data() == item: return node _snake_case = node.get_next() raise Exception("""Node not found""" ) def lowercase (self , UpperCAmelCase ) -> Optional[int]: if (node := self.get_node(UpperCAmelCase )) is not None: if node == self.head: _snake_case = self.head.get_next() if node == self.tail: _snake_case = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase ) @staticmethod def lowercase (UpperCAmelCase ) -> None: if node.get_next(): _snake_case = node.previous if node.get_previous(): _snake_case = node.next _snake_case = None _snake_case = None def lowercase (self ) -> Dict: return self.head is None def __SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'vocab_file': 'vocab.txt'} __lowerCAmelCase = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } __lowerCAmelCase = { 'facebook/esm2_t6_8M_UR50D': 1_024, 'facebook/esm2_t12_35M_UR50D': 1_024, } def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = f.read().splitlines() return [l.strip() for l in lines] class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ["input_ids", "attention_mask"] def __init__(self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> Any: super().__init__(**UpperCAmelCase ) _snake_case = load_vocab_file(UpperCAmelCase ) _snake_case = dict(enumerate(self.all_tokens ) ) _snake_case = {tok: ind for ind, tok in enumerate(self.all_tokens )} _snake_case = unk_token _snake_case = cls_token _snake_case = pad_token _snake_case = mask_token _snake_case = eos_token _snake_case = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def lowercase (self , UpperCAmelCase ) -> str: return self._id_to_token.get(UpperCAmelCase , self.unk_token ) def lowercase (self , UpperCAmelCase ) -> int: return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) ) def lowercase (self , UpperCAmelCase , **UpperCAmelCase ) -> Dict: return text.split() def lowercase (self , UpperCAmelCase=False ) -> List[str]: return len(self._id_to_token ) def lowercase (self ) -> Dict: return {token: i for i, token in enumerate(self.all_tokens )} def lowercase (self , UpperCAmelCase ) -> int: return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) ) def lowercase (self , UpperCAmelCase ) -> str: return self._id_to_token.get(UpperCAmelCase , self.unk_token ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: _snake_case = [self.cls_token_id] _snake_case = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] _snake_case = [1] + ([0] * len(UpperCAmelCase )) + [1] if token_ids_a is not None: mask += [0] * len(UpperCAmelCase ) + [1] return mask def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: _snake_case = os.path.join(UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(UpperCAmelCase , """w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def lowercase (self ) -> int: return self.get_vocab_size(with_added_tokens=UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = False ) -> int: return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
341
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCAmelCase = 8 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x * 255).int().clamp(0 , 255 ) _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" ) _snake_case = ((x & mask) != 0).float() _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" ) _snake_case = bits * 2 - 1 return bits def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x > 0).int() _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 ) _snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" ) return (dec / 255).clamp(0.0 , 1.0 ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _snake_case = self.alphas_cumprod[timestep] _snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu""" _snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise _snake_case = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): _snake_case = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 ) else: _snake_case = None # 1. compute alphas, betas _snake_case = self.alphas_cumprod[t] _snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one _snake_case = 1 - alpha_prod_t _snake_case = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _snake_case = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _snake_case = 0 if t > 0: _snake_case = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device ) _snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise _snake_case = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Tuple: super().__init__() _snake_case = bit_scale _snake_case = ( ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]: _snake_case = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , ) _snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale _snake_case = latents.to(self.device ) self.scheduler.set_timesteps(UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample _snake_case = bits_to_decimal(UpperCAmelCase ) if output_type == "pil": _snake_case = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
341
1
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path / """cache""" _snake_case = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path / """cache""" _snake_case = {"""text""": """string"""} _snake_case = features.copy() if features else default_expected_features _snake_case = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path / """cache""" _snake_case = {"""text""": """string"""} _snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read() _check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = text_path elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [text_path] _snake_case = tmp_path / """cache""" _snake_case = {"""text""": """string"""} _snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=("train",) ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for split in splits: _snake_case = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path / """cache""" _snake_case = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case = TextDatasetReader({"""train""": text_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _snake_case = {"""text""": """string"""} _snake_case = features.copy() if features else default_expected_features _snake_case = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case = TextDatasetReader({"""train""": text_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if split: _snake_case = {split: text_path} else: _snake_case = """train""" _snake_case = {"""train""": text_path, """test""": text_path} _snake_case = tmp_path / """cache""" _snake_case = {"""text""": """string"""} _snake_case = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
341
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ): _snake_case = 1 _snake_case = 2 _snake_case = 0 _snake_case = 0 _snake_case = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
341
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __lowerCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __lowerCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __lowerCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __lowerCAmelCase = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __lowerCAmelCase = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __lowerCAmelCase = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __lowerCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) __lowerCAmelCase = np.expand_dims(test_image, axis=0) __lowerCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __lowerCAmelCase = 'Normal' if result[0][0] == 1: __lowerCAmelCase = 'Abnormality detected'
341
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "deberta-v2" def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]: super().__init__(**UpperCAmelCase ) _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = relative_attention _snake_case = max_relative_positions _snake_case = pad_token_id _snake_case = position_biased_input # Backwards compatibility if type(UpperCAmelCase ) == str: _snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )] _snake_case = pos_att_type _snake_case = vocab_size _snake_case = layer_norm_eps _snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase ) _snake_case = pooler_dropout _snake_case = pooler_hidden_act class _lowerCAmelCase ( __snake_case ): '''simple docstring''' @property def lowercase (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowercase (self ) -> int: return 12 def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]: _snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
341
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCAmelCase = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
'''simple docstring''' __lowerCAmelCase = [ (1_000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} _snake_case = 0 _snake_case = 0 while place < len(_SCREAMING_SNAKE_CASE ): if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] for arabic, roman in ROMAN: ((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) result.append(roman * factor ) if number == 0: break return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowerCAmelCase = 16 __lowerCAmelCase = 32 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ): _snake_case = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _snake_case = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _snake_case = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _snake_case = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. _snake_case = DataLoader( tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _snake_case = DataLoader( tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): model.eval() _snake_case = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case = model(**_SCREAMING_SNAKE_CASE ) _snake_case = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _snake_case, _snake_case = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_SCREAMING_SNAKE_CASE ) - 1: _snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen] _snake_case = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _snake_case = metric.compute() return eval_metric["accuracy"] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Initialize accelerator _snake_case = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case = config["""lr"""] _snake_case = int(config["""num_epochs"""] ) _snake_case = int(config["""seed"""] ) _snake_case = int(config["""batch_size"""] ) _snake_case = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) _snake_case, _snake_case = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer _snake_case = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _snake_case = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: _snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: _snake_case = 1 _snake_case = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _snake_case = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: _snake_case = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over _snake_case = 0 # We also need to keep track of the stating epoch so files are named properly _snake_case = 0 _snake_case = evaluate.load("""glue""" , """mrpc""" ) _snake_case = num_epochs if args.partial_train_epoch is not None: _snake_case = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) _snake_case = args.resume_from_checkpoint.split("""epoch_""" )[1] _snake_case = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break _snake_case = int(_SCREAMING_SNAKE_CASE ) + 1 _snake_case = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) accelerator.print("""resumed checkpoint performance:""" , _SCREAMING_SNAKE_CASE ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f: _snake_case = json.load(_SCREAMING_SNAKE_CASE ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model _snake_case = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): _snake_case = model(**_SCREAMING_SNAKE_CASE ) _snake_case = outputs.loss _snake_case = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 _snake_case = f"""epoch_{epoch}""" _snake_case = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE ) accelerator.save_state(_SCREAMING_SNAKE_CASE ) _snake_case = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = accuracy _snake_case = lr_scheduler.get_lr()[0] _snake_case = optimizer.param_groups[0]["""lr"""] _snake_case = epoch _snake_case = overall_step accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( """--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=2 , help="""Number of train epochs.""" , ) _snake_case = parser.parse_args() _snake_case = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['PerceiverFeatureExtractor'] __lowerCAmelCase = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['PerceiverFeatureExtractor'] __lowerCAmelCase = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __lowerCAmelCase = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): if attention_mask is None: _snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = eos_token_id _snake_case = pad_token_id _snake_case = bos_token_id _snake_case = initializer_range def lowercase (self ) -> str: _snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 ) _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , ) _snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def lowercase (self ) -> Dict: _snake_case, _snake_case = self.prepare_config_and_inputs() return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: _snake_case = 20 _snake_case = model_class_name(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] ) _snake_case, _snake_case = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) _snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _snake_case = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _snake_case = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , ) _snake_case = model.decode(UpperCAmelCase , UpperCAmelCase ) _snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: _snake_case = 20 _snake_case = model_class_name(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] ) _snake_case, _snake_case = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _snake_case = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) _snake_case = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _snake_case = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase ) _snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = 99 def lowercase (self ) -> Any: _snake_case = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _snake_case = input_ids.shape[0] _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase (self ) -> Optional[Any]: _snake_case, _snake_case, _snake_case = self._get_config_and_data() _snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) _snake_case = lm_model(input_ids=UpperCAmelCase ) _snake_case = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) _snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase ) _snake_case = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 ) _snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() _snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ): '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase (self ) -> Any: _snake_case = FlaxBlenderbotModelTester(self ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) _snake_case = model_class(UpperCAmelCase ) @jax.jit def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase ) with self.subTest("""JIT Enabled""" ): _snake_case = encode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _snake_case = encode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = model_class(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _snake_case = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): return model.decode( decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , ) with self.subTest("""JIT Enabled""" ): _snake_case = decode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _snake_case = decode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase (self ) -> Any: for model_class_name in self.all_model_classes: _snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _snake_case = np.ones((1, 1) ) * model.config.eos_token_id _snake_case = model(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" ) @slow def lowercase (self ) -> Dict: _snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25} _snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} _snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase ) _snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" ) _snake_case = ["""Sam"""] _snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" ) _snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase ) _snake_case = """Sam is a great name. It means \"sun\" in Gaelic.""" _snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase ) assert generated_txt[0].strip() == tgt_text
341
1
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def __SCREAMING_SNAKE_CASE ( ): _snake_case = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" ) _snake_case = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) DownloadCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) RunCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) ServeCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) UserCommands.register_subcommand(_SCREAMING_SNAKE_CASE ) AddNewModelCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) AddNewModelLikeCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) LfsCommands.register_subcommand(_SCREAMING_SNAKE_CASE ) PTtoTFCommand.register_subcommand(_SCREAMING_SNAKE_CASE ) # Let's go _snake_case = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run _snake_case = args.func(_SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
341
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]: _snake_case = parent _snake_case = batch_size _snake_case = is_training _snake_case = use_auxiliary_loss _snake_case = num_queries _snake_case = num_channels _snake_case = min_size _snake_case = max_size _snake_case = num_labels _snake_case = mask_feature_size def lowercase (self ) -> str: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCAmelCase ) _snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase ) _snake_case = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5 ).float() _snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long() _snake_case = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase (self ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase (self ) -> Optional[Any]: _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs() _snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int: _snake_case = output.encoder_hidden_states _snake_case = output.pixel_decoder_hidden_states _snake_case = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]: with torch.no_grad(): _snake_case = MaskFormerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase ) _snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: _snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() def comm_check_on_output(UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase ) _snake_case = model(UpperCAmelCase ) comm_check_on_output(UpperCAmelCase ) _snake_case = model( pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ) comm_check_on_output(UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCAmelCase_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> int: _snake_case = MaskFormerModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase ) def lowercase (self ) -> int: self.config_tester.run_common_tests() def lowercase (self ) -> List[Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase ) def lowercase (self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def lowercase (self ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def lowercase (self ) -> Optional[int]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def lowercase (self ) -> Optional[int]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> Optional[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> Tuple: pass def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) @slow def lowercase (self ) -> int: for model_name in ["facebook/maskformer-swin-small-coco"]: _snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = (self.model_tester.min_size,) * 2 _snake_case = { """pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ), """class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(), } _snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase ) _snake_case = model(**UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def lowercase (self ) -> Dict: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase ) _snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def lowercase (self ) -> Tuple: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _snake_case = self.all_model_classes[1] _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() _snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss loss.backward() def lowercase (self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss _snake_case = self.all_model_classes[1] _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = True _snake_case = True _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() _snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ) _snake_case = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _snake_case = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _snake_case = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _snake_case = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __lowerCAmelCase = 1E-4 def __SCREAMING_SNAKE_CASE ( ): _snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase (self ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def lowercase (self ) -> str: _snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) _snake_case = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) _snake_case = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> List[str]: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [ [1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0], [3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0], [1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0], ] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> List[Any]: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> Tuple: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) _snake_case = inputs["""pixel_values"""].to(UpperCAmelCase ) _snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]] _snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
341
1
'''simple docstring''' import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _lowerCAmelCase ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = RoFormerTokenizer lowerCAmelCase_ = RoFormerTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def lowercase (self ) -> Tuple: super().setUp() def lowercase (self , **UpperCAmelCase ) -> Optional[Any]: return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCAmelCase ) def lowercase (self , **UpperCAmelCase ) -> Tuple: return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case = """永和服装饰品有限公司,今天天气非常好""" _snake_case = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def lowercase (self ) -> int: _snake_case = self.get_tokenizer() _snake_case, _snake_case = self.get_chinese_input_output_texts() _snake_case = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , output_text.split() ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase ) def lowercase (self ) -> str: _snake_case = self.get_rust_tokenizer() _snake_case, _snake_case = self.get_chinese_input_output_texts() _snake_case = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , output_text.split() ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase ) def lowercase (self ) -> Dict: pass def lowercase (self ) -> List[Any]: pass def lowercase (self ) -> Union[str, Any]: pass
341
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self , UpperCAmelCase ) -> Union[str, Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): _snake_case = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sgugger/tiny-distilbert-classification""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) # set architectures equal to `None` _snake_case = None _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def lowercase (self ) -> Tuple: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Any: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> int: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> str: _snake_case = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() ) def lowercase (self ) -> int: _snake_case = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCAmelCase ): self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """current""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() )
341
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = out_features _snake_case = num_labels _snake_case = scope _snake_case = num_stages def lowercase (self ) -> List[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase (self ) -> Tuple: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowercase (self ) -> Any: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: _snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowercase (self ) -> Tuple: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ), ( _snake_case ), ( _snake_case ), ) = config_and_inputs _snake_case = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Optional[Any]: _snake_case = UperNetModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def lowercase (self ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase (self ) -> Union[str, Any]: return def lowercase (self ) -> Union[str, Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def lowercase (self ) -> List[str]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> int: pass def lowercase (self ) -> List[str]: def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = _config_zero_init(UpperCAmelCase ) _snake_case = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def lowercase (self ) -> Optional[Any]: pass @slow def lowercase (self ) -> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
341
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _snake_case, _snake_case = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _snake_case = int(max_value - min_value ) + 1 _snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
341
1
'''simple docstring''' __lowerCAmelCase = { 'meter': 'm', 'kilometer': 'km', 'megametre': 'Mm', 'gigametre': 'Gm', 'terametre': 'Tm', 'petametre': 'Pm', 'exametre': 'Em', 'zettametre': 'Zm', 'yottametre': 'Ym', } # Exponent of the factor(meter) __lowerCAmelCase = { 'm': 0, 'km': 3, 'Mm': 6, 'Gm': 9, 'Tm': 12, 'Pm': 15, 'Em': 18, 'Zm': 21, 'Ym': 24, } def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = from_type.lower().strip("""s""" ) _snake_case = to_type.lower().strip("""s""" ) _snake_case = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if from_sanitized not in METRIC_CONVERSION: _snake_case = ( f"""Invalid 'from_type' value: {from_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(_SCREAMING_SNAKE_CASE )}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if to_sanitized not in METRIC_CONVERSION: _snake_case = ( f"""Invalid 'to_type' value: {to_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(_SCREAMING_SNAKE_CASE )}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) _snake_case = METRIC_CONVERSION[from_sanitized] _snake_case = METRIC_CONVERSION[to_sanitized] _snake_case = 1 if from_exponent > to_exponent: _snake_case = from_exponent - to_exponent else: _snake_case = -(to_exponent - from_exponent) return value * pow(10 , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": from doctest import testmod testmod()
341
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: if not conversation_id: _snake_case = uuid.uuida() if past_user_inputs is None: _snake_case = [] if generated_responses is None: _snake_case = [] _snake_case = conversation_id _snake_case = past_user_inputs _snake_case = generated_responses _snake_case = text def __eq__(self , UpperCAmelCase ) -> Dict: if not isinstance(UpperCAmelCase , UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = False ) -> int: if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) _snake_case = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: _snake_case = text def lowercase (self ) -> int: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) _snake_case = None def lowercase (self , UpperCAmelCase ) -> Any: self.generated_responses.append(UpperCAmelCase ) def lowercase (self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__(self ) -> Optional[int]: _snake_case = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): _snake_case = """user""" if is_user else """bot""" output += f"""{name} >> {text} \n""" return output @add_end_docstrings( __snake_case , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.tokenizer.pad_token_id is None: _snake_case = self.tokenizer.eos_token def lowercase (self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict: _snake_case = {} _snake_case = {} _snake_case = {} if min_length_for_response is not None: _snake_case = min_length_for_response if minimum_tokens is not None: _snake_case = minimum_tokens if "max_length" in generate_kwargs: _snake_case = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__(self , UpperCAmelCase , UpperCAmelCase=0 , **UpperCAmelCase ) -> Union[str, Any]: _snake_case = super().__call__(UpperCAmelCase , num_workers=UpperCAmelCase , **UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1: return outputs[0] return outputs def lowercase (self , UpperCAmelCase , UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): _snake_case = self.tokenizer._build_conversation_input_ids(UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version _snake_case = self._legacy_parse_and_tokenize(UpperCAmelCase ) if self.framework == "pt": _snake_case = torch.LongTensor([input_ids] ) elif self.framework == "tf": _snake_case = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowercase (self , UpperCAmelCase , UpperCAmelCase=10 , **UpperCAmelCase ) -> Optional[int]: _snake_case = generate_kwargs.get("""max_length""" , self.model.config.max_length ) _snake_case = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) _snake_case = max_length - minimum_tokens _snake_case = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: _snake_case = model_inputs["""attention_mask"""][:, -trim:] _snake_case = model_inputs.pop("""conversation""" ) _snake_case = max_length _snake_case = self.model.generate(**UpperCAmelCase , **UpperCAmelCase ) if self.model.config.is_encoder_decoder: _snake_case = 1 else: _snake_case = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowercase (self , UpperCAmelCase , UpperCAmelCase=True ) -> List[str]: _snake_case = model_outputs["""output_ids"""] _snake_case = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) _snake_case = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCAmelCase ) return conversation def lowercase (self , UpperCAmelCase ) -> Dict: _snake_case = self.tokenizer.eos_token_id _snake_case = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) if len(UpperCAmelCase ) > self.tokenizer.model_max_length: _snake_case = input_ids[-self.tokenizer.model_max_length :] return input_ids
341
1
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = str(_SCREAMING_SNAKE_CASE ) return n == n[::-1] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100_0000 ): _snake_case = 0 for i in range(1 , _SCREAMING_SNAKE_CASE ): if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
'''simple docstring''' from math import factorial, radians def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ): _snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _snake_case = radians(_SCREAMING_SNAKE_CASE ) _snake_case = angle_in_radians _snake_case = 3 _snake_case = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) _snake_case = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__('doctest').testmod()
341
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] __lowerCAmelCase = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] __lowerCAmelCase = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): __lowerCAmelCase = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __lowerCAmelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __lowerCAmelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n' __lowerCAmelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[ """https://github.com/m-popovic/chrF""", ] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = CHRF.CHAR_ORDER , UpperCAmelCase = CHRF.WORD_ORDER , UpperCAmelCase = CHRF.BETA , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , ) -> int: _snake_case = len(references[0] ) if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) _snake_case = [[refs[i] for refs in references] for i in range(UpperCAmelCase )] _snake_case = CHRF(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) _snake_case = sb_chrf.corpus_score(UpperCAmelCase , UpperCAmelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
341
1
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = MgpstrTokenizer lowerCAmelCase_ = False lowerCAmelCase_ = {} lowerCAmelCase_ = False def lowercase (self ) -> Tuple: super().setUp() # fmt: off _snake_case = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _snake_case = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + """\n""" ) def lowercase (self , **UpperCAmelCase ) -> Optional[Any]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> List[Any]: _snake_case = """tester""" _snake_case = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def lowercase (self ) -> Dict: pass def lowercase (self ) -> Tuple: _snake_case = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): _snake_case = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) _snake_case = tokenizer.encode([special_token] , add_special_tokens=UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 1 ) _snake_case = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def lowercase (self ) -> List[str]: _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): _snake_case, _snake_case = self.get_input_output_texts(UpperCAmelCase ) _snake_case = tokenizer.tokenize(UpperCAmelCase ) _snake_case = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) _snake_case = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertNotEqual(len(UpperCAmelCase ) , 0 ) _snake_case = tokenizer.decode(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , UpperCAmelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def lowercase (self ) -> Tuple: pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def lowercase (self ) -> Tuple: pass
341
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCAmelCase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' __lowerCAmelCase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' __lowerCAmelCase = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]: _snake_case = spearmanr(UpperCAmelCase , UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
341
1
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [0] * len(_SCREAMING_SNAKE_CASE ) _snake_case = [] _snake_case = [1] * len(_SCREAMING_SNAKE_CASE ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_SCREAMING_SNAKE_CASE ) ): if indegree[i] == 0: queue.append(_SCREAMING_SNAKE_CASE ) while queue: _snake_case = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: _snake_case = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(_SCREAMING_SNAKE_CASE ) print(max(_SCREAMING_SNAKE_CASE ) ) # Adjacency list of Graph __lowerCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
341
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = out_features _snake_case = num_labels _snake_case = scope _snake_case = num_stages def lowercase (self ) -> List[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase (self ) -> Tuple: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowercase (self ) -> Any: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: _snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowercase (self ) -> Tuple: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ), ( _snake_case ), ( _snake_case ), ) = config_and_inputs _snake_case = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> Optional[Any]: _snake_case = UperNetModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def lowercase (self ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase (self ) -> Union[str, Any]: return def lowercase (self ) -> Union[str, Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def lowercase (self ) -> List[str]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @unittest.skip(reason="""UperNet does not have a base model""" ) def lowercase (self ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> str: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> int: pass def lowercase (self ) -> List[str]: def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = _config_zero_init(UpperCAmelCase ) _snake_case = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _snake_case = model_class(config=UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def lowercase (self ) -> Optional[Any]: pass @slow def lowercase (self ) -> Tuple: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" ) return image @require_torch @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) def lowercase (self ) -> Any: _snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase ) _snake_case = prepare_img() _snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) _snake_case = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
341
1
'''simple docstring''' __lowerCAmelCase = 'Input must be a string of 8 numbers plus letter' __lowerCAmelCase = 'TRWAGMYFPDXBNJZSQVHLCKE' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}""" raise TypeError(_SCREAMING_SNAKE_CASE ) _snake_case = spanish_id.replace("""-""" , """""" ).upper() if len(_SCREAMING_SNAKE_CASE ) != 9: raise ValueError(_SCREAMING_SNAKE_CASE ) try: _snake_case = int(spanish_id_clean[0:8] ) _snake_case = spanish_id_clean[8] except ValueError as ex: raise ValueError(_SCREAMING_SNAKE_CASE ) from ex if letter.isdigit(): raise ValueError(_SCREAMING_SNAKE_CASE ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' import argparse from collections import defaultdict def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = f.readlines() _snake_case = f"""class {class_name}(""" _snake_case = f"""{4 * " "}def {test_name}(""" _snake_case = f"""{8 * " "}{correct_line.split()[0]}""" _snake_case = f"""{16 * " "}{correct_line.split()[0]}""" _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = 0 _snake_case = 0 _snake_case = [] for line in lines: if line.startswith(_SCREAMING_SNAKE_CASE ): _snake_case = True elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ): _snake_case = True elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )): _snake_case = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _snake_case = True if in_class and in_func and in_line: if ")" not in line: continue else: _snake_case = True if in_class and in_func and in_line and insert_line: new_lines.append(f"""{spaces * " "}{correct_line}""" ) _snake_case = _snake_case = _snake_case = _snake_case = False else: new_lines.append(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): if fail is not None: with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = {l.strip() for l in f.readlines()} else: _snake_case = None with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = f.readlines() _snake_case = defaultdict(_SCREAMING_SNAKE_CASE ) for line in correct_lines: _snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) __lowerCAmelCase = parser.parse_args() main(args.correct_filename, args.fail_filename)
341
1
'''simple docstring''' from __future__ import annotations import math def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , ) return min( minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = [90, 23, 6, 33, 21, 65, 123, 3_4423] _snake_case = math.log(len(_SCREAMING_SNAKE_CASE ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = os.path.join(args.tf_model_dir , """parameters.json""" ) _snake_case = json.loads(open(_SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(""".pt""" ): _snake_case = args.output + """.pt""" _snake_case = OrderedDict() with tf.device("""/CPU:0""" ): _snake_case = tf.train.load_checkpoint(args.tf_model_dir ) _snake_case = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _snake_case = reader.get_tensor(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): _snake_case = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): _snake_case = 8 _snake_case = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith("""model/moe""" ): _snake_case = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): _snake_case = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player _snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/softmlp/kernel""" ): _snake_case = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player _snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): _snake_case = key_name[-9:-7] for i in range(16 ): _snake_case = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) _snake_case = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith("""model/mlp""" ): _snake_case = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): _snake_case = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player _snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/p1/bias""" ): _snake_case = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/p2/kernel""" ): _snake_case = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player _snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/p2/bias""" ): _snake_case = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith("""model/ln""" ): _snake_case = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _snake_case = """model.blocks.%d.feed_forward.norm.bias""" % player _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/g""" ): _snake_case = """model.blocks.%d.feed_forward.norm.weight""" % player _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith("""model/att""" ): _snake_case = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): _snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _snake_case = state[:, 0, :, :] _snake_case = state[:, 1, :, :] _snake_case = state[:, 2, :, :] _snake_case = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) _snake_case = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) _snake_case = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/o/kernel""" ): _snake_case = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player _snake_case = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith("""model/an""" ): _snake_case = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): _snake_case = """model.blocks.%d.self_attn.norm.bias""" % player _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.endswith("""/g""" ): _snake_case = """model.blocks.%d.self_attn.norm.weight""" % player _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): _snake_case = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] _snake_case = """model.%s.weight""" % nlayer _snake_case = vnp.copy() # same in embedded _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) if key_name.startswith("""model/wte""" ): _snake_case = """lm_head.weight""" _snake_case = vnp.copy() # same in embedded _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name.startswith("""model/wob""" ): _snake_case = """final_logits_bias""" _snake_case = vnp.copy() # same in embedded _snake_case = state.reshape((1, -1) ) _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": _snake_case = """model.last_project.weight""" _snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": _snake_case = """model.last_project.bias""" _snake_case = vnp.copy() # same because it is one dimensional _snake_case = torch.tensor(_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') __lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
341
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = ["pixel_values"] def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: super().__init__(**UpperCAmelCase ) _snake_case = size if size is not None else {"""height""": 256, """width""": 256} _snake_case = get_size_dict(UpperCAmelCase ) _snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = do_center_crop _snake_case = crop_size _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_normalize _snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: _snake_case = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = resample if resample is not None else self.resample _snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case = do_rescale if do_rescale is not None else self.do_rescale _snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case = do_normalize if do_normalize is not None else self.do_normalize _snake_case = image_mean if image_mean is not None else self.image_mean _snake_case = image_std if image_std is not None else self.image_std _snake_case = size if size is not None else self.size _snake_case = get_size_dict(UpperCAmelCase ) _snake_case = crop_size if crop_size is not None else self.crop_size _snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) _snake_case = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _snake_case = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: _snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: _snake_case = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: _snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: _snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] _snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] _snake_case = {"""pixel_values""": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
341
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "beit" def __init__(self , UpperCAmelCase=8192 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=224 , UpperCAmelCase=16 , UpperCAmelCase=3 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=True , UpperCAmelCase=[3, 5, 7, 11] , UpperCAmelCase=[1, 2, 3, 6] , UpperCAmelCase=True , UpperCAmelCase=0.4 , UpperCAmelCase=256 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=255 , **UpperCAmelCase , ) -> str: super().__init__(**UpperCAmelCase ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = use_mask_token _snake_case = use_absolute_position_embeddings _snake_case = use_relative_position_bias _snake_case = use_shared_relative_position_bias _snake_case = layer_scale_init_value _snake_case = drop_path_rate _snake_case = use_mean_pooling # decode head attributes (semantic segmentation) _snake_case = out_indices _snake_case = pool_scales # auxiliary head attributes (semantic segmentation) _snake_case = use_auxiliary_head _snake_case = auxiliary_loss_weight _snake_case = auxiliary_channels _snake_case = auxiliary_num_convs _snake_case = auxiliary_concat_input _snake_case = semantic_loss_ignore_index class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = version.parse("1.11" ) @property def lowercase (self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase (self ) -> float: return 1e-4
341
'''simple docstring''' __lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure the supplied data is a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(_SCREAMING_SNAKE_CASE ) _snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data ) _snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0 if padding_needed: # The padding that will be added later _snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6) else: _snake_case = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode() + padding ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = ( """argument should be a bytes-like object or ASCII string, """ f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): try: _snake_case = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) _snake_case = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _snake_case = encoded_data[:-padding] _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data ) _snake_case = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 ) ] return bytes(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): return [ord(_SCREAMING_SNAKE_CASE ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , _SCREAMING_SNAKE_CASE ) print("""Decoded:""" , decode(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main()
341
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) _snake_case = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): _snake_case, _snake_case = grid.shape _snake_case = [-1, 1, 0, 0] _snake_case = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] _snake_case, _snake_case = [(0, source)], set() _snake_case = np.full((rows, cols) , np.inf ) _snake_case = 0 _snake_case = np.empty((rows, cols) , dtype=_SCREAMING_SNAKE_CASE ) _snake_case = None while queue: ((_snake_case), (_snake_case)) = heappop(_SCREAMING_SNAKE_CASE ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: _snake_case = [] while (x, y) != source: path.append((x, y) ) _snake_case, _snake_case = predecessors[x, y] path.append(_SCREAMING_SNAKE_CASE ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_SCREAMING_SNAKE_CASE ) ): _snake_case, _snake_case = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: _snake_case = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) ) _snake_case = dist + 1 _snake_case = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ): _snake_case = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _snake_case = SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path="""train""" , **_SCREAMING_SNAKE_CASE ) _snake_case = tok.pad_token_id def get_lens(_SCREAMING_SNAKE_CASE ): _snake_case = tqdm( DataLoader(_SCREAMING_SNAKE_CASE , batch_size=512 , num_workers=8 , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _snake_case = [] for batch in dl: _snake_case = batch["""input_ids"""].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist() _snake_case = batch["""labels"""].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist() if consider_target: for src, tgt in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): max_lens.append(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) else: max_lens.extend(_SCREAMING_SNAKE_CASE ) return max_lens _snake_case = get_lens(_SCREAMING_SNAKE_CASE ) _snake_case = SeqaSeqDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , type_path="""val""" , **_SCREAMING_SNAKE_CASE ) _snake_case = get_lens(_SCREAMING_SNAKE_CASE ) pickle_save(_SCREAMING_SNAKE_CASE , train_ds.len_file ) pickle_save(_SCREAMING_SNAKE_CASE , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
341
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union __lowerCAmelCase = TypeVar('T') __lowerCAmelCase = Union[List[T], Tuple[T, ...]] __lowerCAmelCase = Union[T, List[T], Dict[str, T]] __lowerCAmelCase = Union[str, bytes, os.PathLike]
341
1
'''simple docstring''' __lowerCAmelCase = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355818, } def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _snake_case = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: _snake_case = data _snake_case = previous _snake_case = next_node def __str__(self ) -> str: return f"""{self.data}""" def lowercase (self ) -> int: return self.data def lowercase (self ) -> Dict: return self.next def lowercase (self ) -> Union[str, Any]: return self.previous class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> List[str]: _snake_case = head def __iter__(self ) -> Optional[Any]: return self def lowercase (self ) -> str: if not self.current: raise StopIteration else: _snake_case = self.current.get_data() _snake_case = self.current.get_next() return value class _lowerCAmelCase : '''simple docstring''' def __init__(self ) -> Optional[int]: _snake_case = None # First node in list _snake_case = None # Last node in list def __str__(self ) -> Optional[int]: _snake_case = self.head _snake_case = [] while current is not None: nodes.append(current.get_data() ) _snake_case = current.get_next() return " ".join(str(UpperCAmelCase ) for node in nodes ) def __contains__(self , UpperCAmelCase ) -> int: _snake_case = self.head while current: if current.get_data() == value: return True _snake_case = current.get_next() return False def __iter__(self ) -> Union[str, Any]: return LinkedListIterator(self.head ) def lowercase (self ) -> str: if self.head: return self.head.get_data() return None def lowercase (self ) -> List[Any]: if self.tail: return self.tail.get_data() return None def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: _snake_case = node _snake_case = node else: self.insert_before_node(self.head , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: if self.head is None: self.set_head(UpperCAmelCase ) else: self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> None: _snake_case = Node(UpperCAmelCase ) if self.head is None: self.set_head(UpperCAmelCase ) else: self.set_tail(UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.previous if node.get_previous() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = node _snake_case = node.next if node.get_next() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None: _snake_case = 1 _snake_case = Node(UpperCAmelCase ) _snake_case = self.head while node: if current_position == position: self.insert_before_node(UpperCAmelCase , UpperCAmelCase ) return current_position += 1 _snake_case = node.next self.insert_after_node(self.tail , UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Node: _snake_case = self.head while node: if node.get_data() == item: return node _snake_case = node.get_next() raise Exception("""Node not found""" ) def lowercase (self , UpperCAmelCase ) -> Optional[int]: if (node := self.get_node(UpperCAmelCase )) is not None: if node == self.head: _snake_case = self.head.get_next() if node == self.tail: _snake_case = self.tail.get_previous() self.remove_node_pointers(UpperCAmelCase ) @staticmethod def lowercase (UpperCAmelCase ) -> None: if node.get_next(): _snake_case = node.previous if node.get_previous(): _snake_case = node.next _snake_case = None _snake_case = None def lowercase (self ) -> Dict: return self.head is None def __SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Any: _snake_case = """ZinengTang/tvlt-base""" _snake_case = tempfile.mkdtemp() def lowercase (self , **UpperCAmelCase ) -> str: return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase ) def lowercase (self , **UpperCAmelCase ) -> List[Any]: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase ) def lowercase (self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def lowercase (self ) -> Any: _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _snake_case = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def lowercase (self ) -> str: _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase ) _snake_case = np.ones([12000] ) _snake_case = feature_extractor(UpperCAmelCase , return_tensors="""np""" ) _snake_case = processor(audio=UpperCAmelCase , return_tensors="""np""" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase (self ) -> int: _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase ) _snake_case = np.ones([3, 224, 224] ) _snake_case = image_processor(UpperCAmelCase , return_tensors="""np""" ) _snake_case = processor(images=UpperCAmelCase , return_tensors="""np""" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase (self ) -> Union[str, Any]: _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase ) _snake_case = np.ones([12000] ) _snake_case = np.ones([3, 224, 224] ) _snake_case = processor(audio=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def lowercase (self ) -> str: _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
341
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCAmelCase = 8 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x * 255).int().clamp(0 , 255 ) _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c h w -> b c 1 h w""" ) _snake_case = ((x & mask) != 0).float() _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b c d h w -> b (c d) h w""" ) _snake_case = bits * 2 - 1 return bits def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): _snake_case = x.device _snake_case = (x > 0).int() _snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """d -> d 1 1""" ) _snake_case = rearrange(_SCREAMING_SNAKE_CASE , """b (c d) h w -> b c d h w""" , d=8 ) _snake_case = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" ) return (dec / 255).clamp(0.0 , 1.0 ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _snake_case = self.alphas_cumprod[timestep] _snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _snake_case = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else """cpu""" _snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) _snake_case = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise _snake_case = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): _snake_case = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _snake_case, _snake_case = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 ) else: _snake_case = None # 1. compute alphas, betas _snake_case = self.alphas_cumprod[t] _snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one _snake_case = 1 - alpha_prod_t _snake_case = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _snake_case = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" _snake_case = self.bit_scale if self.config.clip_sample: _snake_case = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _snake_case = 0 if t > 0: _snake_case = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device ) _snake_case = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise _snake_case = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> Tuple: super().__init__() _snake_case = bit_scale _snake_case = ( ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__(self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]: _snake_case = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , ) _snake_case = decimal_to_bits(UpperCAmelCase ) * self.bit_scale _snake_case = latents.to(self.device ) self.scheduler.set_timesteps(UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample _snake_case = bits_to_decimal(UpperCAmelCase ) if output_type == "pil": _snake_case = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
341
1
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", f"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", f"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", f"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", f"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [] attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] token.append((f"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") ) return token def __SCREAMING_SNAKE_CASE ( ): _snake_case = [] head.append(("""layernorm.weight""", """norm.weight""") ) head.append(("""layernorm.bias""", """norm.bias""") ) head.append(("""classifier.weight""", """head.weight""") ) head.append(("""classifier.bias""", """head.bias""") ) return head def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = """imagenet-1k-id2label.json""" _snake_case = 1000 _snake_case = """huggingface/label-files""" _snake_case = num_labels _snake_case = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) ) _snake_case = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} _snake_case = _snake_case = CvtConfig(num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13": _snake_case = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21": _snake_case = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: _snake_case = [2, 2, 20] _snake_case = [3, 12, 16] _snake_case = [192, 768, 1024] _snake_case = CvtForImageClassification(_SCREAMING_SNAKE_CASE ) _snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) _snake_case = image_size _snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) ) _snake_case = OrderedDict() _snake_case = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: _snake_case = list_of_state_dict + cls_token(_SCREAMING_SNAKE_CASE ) _snake_case = list_of_state_dict + embeddings(_SCREAMING_SNAKE_CASE ) for cnt in range(config.depth[idx] ): _snake_case = list_of_state_dict + attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = list_of_state_dict + final() for gg in list_of_state_dict: print(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): _snake_case = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __lowerCAmelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
341
'''simple docstring''' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ): _snake_case = 1 _snake_case = 2 _snake_case = 0 _snake_case = 0 _snake_case = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
341
1
'''simple docstring''' import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __lowerCAmelCase = logging.getLogger(__name__) __lowerCAmelCase = 'pytorch_model.bin' @dataclasses.dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) lowerCAmelCase_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "The name of the task to train on."} , ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) lowerCAmelCase_ = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) lowerCAmelCase_ = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) lowerCAmelCase_ = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) lowerCAmelCase_ = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) lowerCAmelCase_ = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) lowerCAmelCase_ = dataclasses.field( default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) lowerCAmelCase_ = dataclasses.field( default=__snake_case , metadata={"help": "Random seed for initialization."} , ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: _snake_case = dataset.filter(lambda _SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 _snake_case = int(eval_result * len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) _snake_case = dataset.sort("""probability""" , reverse=_SCREAMING_SNAKE_CASE ) _snake_case = dataset.select(range(_SCREAMING_SNAKE_CASE ) ) _snake_case = dataset.remove_columns(["""label""", """probability"""] ) _snake_case = dataset.rename_column("""prediction""" , """label""" ) _snake_case = dataset.map(lambda _SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} ) _snake_case = dataset.shuffle(seed=args.seed ) _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) else: dataset.to_json(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): _snake_case = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() _snake_case = STModelArguments(model_name_or_path=_SCREAMING_SNAKE_CASE ) _snake_case = STDataArguments(train_file=_SCREAMING_SNAKE_CASE , infer_file=_SCREAMING_SNAKE_CASE ) _snake_case = STTrainingArguments(output_dir=_SCREAMING_SNAKE_CASE ) _snake_case = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_SCREAMING_SNAKE_CASE ).items(): setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for key, value in kwargs.items(): if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Sanity checks _snake_case = {} _snake_case = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None _snake_case = args.train_file _snake_case = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None _snake_case = args.eval_file for key in data_files: _snake_case = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: _snake_case = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) _snake_case = f"""{args.output_dir}/self-train_iter-{{}}""".format _snake_case = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() _snake_case = None _snake_case = None _snake_case = 0 _snake_case = False # Show the progress bar _snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): _snake_case = data_dir_format(_SCREAMING_SNAKE_CASE ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """stage-1""" ) _snake_case = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): arguments_dict.update({key: value} ) _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _SCREAMING_SNAKE_CASE ) finetune(**_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() assert os.path.exists(_SCREAMING_SNAKE_CASE ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _SCREAMING_SNAKE_CASE ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" ) _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """stage-2""" ) # Update arguments_dict _snake_case = model_path _snake_case = data_files["""train"""] _snake_case = current_output_dir _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _SCREAMING_SNAKE_CASE ) finetune(**_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() assert os.path.exists(_SCREAMING_SNAKE_CASE ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _SCREAMING_SNAKE_CASE ) _snake_case = iteration _snake_case = data_dir_format(iteration + 1 ) _snake_case = AutoConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" ) ) _snake_case = config.idalabel _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """eval_results_best-checkpoint.json""" ) _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """test_results_best-checkpoint.json""" ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: _snake_case = float(json.load(_SCREAMING_SNAKE_CASE )[args.eval_metric] ) _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) # Loading the dataset from local csv or json files. _snake_case = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] _snake_case = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(_SCREAMING_SNAKE_CASE ): shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: _snake_case = eval_result if best_iteration is None: _snake_case = new_iteration _snake_case = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: _snake_case = new_iteration _snake_case = new_eval_result _snake_case = 0 else: if new_eval_result == best_eval_result: _snake_case = new_iteration _snake_case = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: _snake_case = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , _SCREAMING_SNAKE_CASE ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , """eval_results_best-iteration.json""" ) , )
341
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "deberta-v2" def __init__(self , UpperCAmelCase=128100 , UpperCAmelCase=1536 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=6144 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-7 , UpperCAmelCase=False , UpperCAmelCase=-1 , UpperCAmelCase=0 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase="gelu" , **UpperCAmelCase , ) -> List[str]: super().__init__(**UpperCAmelCase ) _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = relative_attention _snake_case = max_relative_positions _snake_case = pad_token_id _snake_case = position_biased_input # Backwards compatibility if type(UpperCAmelCase ) == str: _snake_case = [x.strip() for x in pos_att_type.lower().split("""|""" )] _snake_case = pos_att_type _snake_case = vocab_size _snake_case = layer_norm_eps _snake_case = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase ) _snake_case = pooler_dropout _snake_case = pooler_hidden_act class _lowerCAmelCase ( __snake_case ): '''simple docstring''' @property def lowercase (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowercase (self ) -> int: return 12 def lowercase (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 40 , UpperCAmelCase = 40 , UpperCAmelCase = None , ) -> Mapping[str, Any]: _snake_case = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
341
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Load configuration defined in the metadata file with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _snake_case = json.load(_SCREAMING_SNAKE_CASE ) _snake_case = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path _snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""module"""] # Load the entity vocab file _snake_case = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _snake_case = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _snake_case = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks _snake_case = AddedToken("""<ent>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _snake_case = AddedToken("""<ent2>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , """r""" ) as f: _snake_case = json.load(_SCREAMING_SNAKE_CASE ) _snake_case = """MLukeTokenizer""" with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _snake_case = tokenizer.convert_tokens_to_ids(["""@"""] )[0] _snake_case = tokenizer.convert_tokens_to_ids(["""#"""] )[0] _snake_case = state_dict["""embeddings.word_embeddings.weight"""] _snake_case = word_emb[ent_init_index].unsqueeze(0 ) _snake_case = word_emb[enta_init_index].unsqueeze(0 ) _snake_case = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _snake_case = state_dict[bias_name] _snake_case = decoder_bias[ent_init_index].unsqueeze(0 ) _snake_case = decoder_bias[enta_init_index].unsqueeze(0 ) _snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _snake_case = f"""encoder.layer.{layer_index}.attention.self.""" _snake_case = state_dict[prefix + matrix_name] _snake_case = state_dict[prefix + matrix_name] _snake_case = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _snake_case = state_dict["""entity_embeddings.entity_embeddings.weight"""] _snake_case = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) _snake_case = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _snake_case = state_dict["""entity_predictions.bias"""] _snake_case = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) _snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] ) _snake_case = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) _snake_case = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): _snake_case = state_dict[key] else: _snake_case = state_dict[key] _snake_case, _snake_case = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _snake_case = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="""entity_classification""" ) _snake_case = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" _snake_case = (0, 9) _snake_case = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="""pt""" ) _snake_case = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _snake_case = torch.Size((1, 33, 768) ) _snake_case = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _snake_case = torch.Size((1, 1, 768) ) _snake_case = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _snake_case = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _snake_case = """Tokyo is the capital of <mask>.""" _snake_case = (24, 30) _snake_case = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="""pt""" ) _snake_case = model(**_SCREAMING_SNAKE_CASE ) _snake_case = encoding["""input_ids"""][0].tolist() _snake_case = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) _snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _snake_case = outputs.entity_logits[0][0].argmax().item() _snake_case = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = ["""[MASK]""", """[PAD]""", """[UNK]"""] _snake_case = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _snake_case = {} for entry in data: _snake_case = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _snake_case = entity_id break _snake_case = f"""{language}:{entity_name}""" _snake_case = entity_id return new_mapping if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) __lowerCAmelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
341
'''simple docstring''' __lowerCAmelCase = [ (1_000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} _snake_case = 0 _snake_case = 0 while place < len(_SCREAMING_SNAKE_CASE ): if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] for arabic, roman in ROMAN: ((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) result.append(roman * factor ) if number == 0: break return "".join(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
1
'''simple docstring''' import math def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 ): _snake_case = end or len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = i _snake_case = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _snake_case = array[temp_index - 1] temp_index -= 1 _snake_case = temp_index_value return array def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Max Heap _snake_case = index _snake_case = 2 * index + 1 # Left Node _snake_case = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _snake_case = left_index if right_index < heap_size and array[largest] < array[right_index]: _snake_case = right_index if largest != index: _snake_case, _snake_case = array[largest], array[index] heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = len(_SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _snake_case, _snake_case = array[0], array[i] heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE ) return array def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = low _snake_case = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _snake_case, _snake_case = array[j], array[i] i += 1 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) == 0: return array _snake_case = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) ) _snake_case = 16 return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): while end - start > size_threshold: if max_depth == 0: return heap_sort(_SCREAMING_SNAKE_CASE ) max_depth -= 1 _snake_case = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _snake_case = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = p return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase = [float(item) for item in user_input.split(',')] print(sort(unsorted))
341
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['PerceiverFeatureExtractor'] __lowerCAmelCase = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __SCREAMING_SNAKE_CASE ( ): _snake_case = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=_SCREAMING_SNAKE_CASE ) _snake_case = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=_SCREAMING_SNAKE_CASE ) env_command_parser(subparsers=_SCREAMING_SNAKE_CASE ) launch_command_parser(subparsers=_SCREAMING_SNAKE_CASE ) tpu_command_parser(subparsers=_SCREAMING_SNAKE_CASE ) test_command_parser(subparsers=_SCREAMING_SNAKE_CASE ) # Let's go _snake_case = parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run args.func(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
341
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __lowerCAmelCase = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ): if attention_mask is None: _snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = eos_token_id _snake_case = pad_token_id _snake_case = bos_token_id _snake_case = initializer_range def lowercase (self ) -> str: _snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 ) _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , ) _snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def lowercase (self ) -> Dict: _snake_case, _snake_case = self.prepare_config_and_inputs() return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: _snake_case = 20 _snake_case = model_class_name(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] ) _snake_case, _snake_case = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) _snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) _snake_case = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _snake_case = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , ) _snake_case = model.decode(UpperCAmelCase , UpperCAmelCase ) _snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: _snake_case = 20 _snake_case = model_class_name(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] ) _snake_case, _snake_case = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) _snake_case = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) _snake_case = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _snake_case = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) _snake_case = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) _snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase ) _snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = 99 def lowercase (self ) -> Any: _snake_case = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) _snake_case = input_ids.shape[0] _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowercase (self ) -> Optional[Any]: _snake_case, _snake_case, _snake_case = self._get_config_and_data() _snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) _snake_case = lm_model(input_ids=UpperCAmelCase ) _snake_case = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) _snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase ) _snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) _snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) _snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase ) _snake_case = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) _snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 ) _snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() _snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ): '''simple docstring''' lowerCAmelCase_ = True lowerCAmelCase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowercase (self ) -> Any: _snake_case = FlaxBlenderbotModelTester(self ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) _snake_case = model_class(UpperCAmelCase ) @jax.jit def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase ) with self.subTest("""JIT Enabled""" ): _snake_case = encode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _snake_case = encode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase (self ) -> str: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _snake_case = model_class(UpperCAmelCase ) _snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) _snake_case = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): return model.decode( decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , ) with self.subTest("""JIT Enabled""" ): _snake_case = decode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): _snake_case = decode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase (self ) -> Any: for model_class_name in self.all_model_classes: _snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _snake_case = np.ones((1, 1) ) * model.config.eos_token_id _snake_case = model(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" ) @slow def lowercase (self ) -> Dict: _snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25} _snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} _snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase ) _snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" ) _snake_case = ["""Sam"""] _snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" ) _snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase ) _snake_case = """Sam is a great name. It means \"sun\" in Gaelic.""" _snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase ) assert generated_txt[0].strip() == tgt_text
341
1
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="attention" ): _snake_case = _snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] ) _snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) _snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] ) _snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) _snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] ) _snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) _snake_case = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] ) _snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): if split_mlp_wi: _snake_case = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] _snake_case = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] _snake_case = (wi_a, wi_a) else: _snake_case = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] _snake_case = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , *, _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ): _snake_case = traverse_util.flatten_dict(variables["""target"""] ) _snake_case = {"""/""".join(_SCREAMING_SNAKE_CASE ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _snake_case = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , _SCREAMING_SNAKE_CASE ) _snake_case = collections.OrderedDict() # Shared embeddings. _snake_case = old["""token_embedder/embedding"""] # Encoder. for i in range(_SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). _snake_case = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , """pre_attention_layer_norm""" ) _snake_case, _snake_case, _snake_case, _snake_case = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , """attention""" ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 1 (MLP). _snake_case = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , """pre_mlp_layer_norm""" ) _snake_case, _snake_case = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , _SCREAMING_SNAKE_CASE ) _snake_case = layer_norm if split_mlp_wi: _snake_case = wi[0].T _snake_case = wi[1].T else: _snake_case = wi.T _snake_case = wo.T if scalable_attention: # convert the rel_embedding of each layer _snake_case = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" ).T _snake_case = old["""encoder/encoder_norm/scale"""] if not scalable_attention: _snake_case = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , 0 , """encoder""" ).T _snake_case = tax_relpos_bias_lookup( _SCREAMING_SNAKE_CASE , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(_SCREAMING_SNAKE_CASE ): # Block i, layer 0 (Self Attention). _snake_case = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """pre_self_attention_layer_norm""" ) _snake_case, _snake_case, _snake_case, _snake_case = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """self_attention""" ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 1 (Cross Attention). _snake_case = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """pre_cross_attention_layer_norm""" ) _snake_case, _snake_case, _snake_case, _snake_case = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """encoder_decoder_attention""" ) _snake_case = layer_norm _snake_case = k.T _snake_case = o.T _snake_case = q.T _snake_case = v.T # Block i, layer 2 (MLP). _snake_case = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """pre_mlp_layer_norm""" ) _snake_case, _snake_case = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , _SCREAMING_SNAKE_CASE ) _snake_case = layer_norm if split_mlp_wi: _snake_case = wi[0].T _snake_case = wi[1].T else: _snake_case = wi.T _snake_case = wo.T if scalable_attention: # convert the rel_embedding of each layer _snake_case = tax_relpos_bias_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" ).T _snake_case = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _snake_case = old["""decoder/logits_dense/kernel"""].T return new def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _snake_case = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _snake_case = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) _snake_case = state_dict["""shared.weight"""] return state_dict def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE ) _snake_case = convert_tax_to_pytorch( _SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=_SCREAMING_SNAKE_CASE , scalable_attention=_SCREAMING_SNAKE_CASE ) _snake_case = make_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , ): _snake_case = MTaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _snake_case = UMTaEncoderModel(_SCREAMING_SNAKE_CASE ) else: _snake_case = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tax_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Verify that we can load the checkpoint. model.from_pretrained(_SCREAMING_SNAKE_CASE ) print("""Done""" ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) __lowerCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
341
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]: _snake_case = parent _snake_case = batch_size _snake_case = is_training _snake_case = use_auxiliary_loss _snake_case = num_queries _snake_case = num_channels _snake_case = min_size _snake_case = max_size _snake_case = num_labels _snake_case = mask_feature_size def lowercase (self ) -> str: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCAmelCase ) _snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase ) _snake_case = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5 ).float() _snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long() _snake_case = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase (self ) -> Tuple: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase (self ) -> Optional[Any]: _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs() _snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int: _snake_case = output.encoder_hidden_states _snake_case = output.pixel_decoder_hidden_states _snake_case = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]: with torch.no_grad(): _snake_case = MaskFormerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase ) _snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: _snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() def comm_check_on_output(UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase ) _snake_case = model(UpperCAmelCase ) comm_check_on_output(UpperCAmelCase ) _snake_case = model( pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ) comm_check_on_output(UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCAmelCase_ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowercase (self ) -> int: _snake_case = MaskFormerModelTester(self ) _snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase ) def lowercase (self ) -> int: self.config_tester.run_common_tests() def lowercase (self ) -> List[Any]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase ) def lowercase (self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def lowercase (self ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def lowercase (self ) -> Optional[int]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def lowercase (self ) -> int: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def lowercase (self ) -> Optional[int]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def lowercase (self ) -> Optional[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowercase (self ) -> Tuple: pass def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) @slow def lowercase (self ) -> int: for model_name in ["facebook/maskformer-swin-small-coco"]: _snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = (self.model_tester.min_size,) * 2 _snake_case = { """pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ), """class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(), } _snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase ) _snake_case = model(**UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def lowercase (self ) -> Dict: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase ) _snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def lowercase (self ) -> Tuple: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _snake_case = self.all_model_classes[1] _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() _snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss loss.backward() def lowercase (self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss _snake_case = self.all_model_classes[1] _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs() _snake_case = True _snake_case = True _snake_case = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() _snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ) _snake_case = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _snake_case = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _snake_case = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _snake_case = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __lowerCAmelCase = 1E-4 def __SCREAMING_SNAKE_CASE ( ): _snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase (self ) -> Optional[int]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def lowercase (self ) -> str: _snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) _snake_case = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) _snake_case = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) _snake_case = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> List[str]: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [ [1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0], [3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0], [1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0], ] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> List[Any]: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase ) _snake_case = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) # masks_queries_logits _snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) # class_queries_logits _snake_case = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _snake_case = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) ) def lowercase (self ) -> Tuple: _snake_case = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(UpperCAmelCase ) .eval() ) _snake_case = self.default_image_processor _snake_case = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) _snake_case = inputs["""pixel_values"""].to(UpperCAmelCase ) _snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]] _snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): _snake_case = model(**UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
341
1
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase = tuple[int, int] class _lowerCAmelCase : '''simple docstring''' def __init__(self ) -> Dict: _snake_case = [] _snake_case = set() def lowercase (self ) -> Dict: if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def lowercase (self ) -> Any: return len(self.elements ) == 0 def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Any: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(UpperCAmelCase ) else: # update # print("update", item) _snake_case = [] ((_snake_case), (_snake_case)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((_snake_case), (_snake_case)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def lowercase (self , UpperCAmelCase ) -> str: if item in self.set: self.set.remove(UpperCAmelCase ) _snake_case = [] ((_snake_case), (_snake_case)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((_snake_case), (_snake_case)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def lowercase (self ) -> Any: return self.elements[0][1] def lowercase (self ) -> str: ((_snake_case), (_snake_case)) = heapq.heappop(self.elements ) self.set.remove(UpperCAmelCase ) return (priority, item) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # euclidean distance _snake_case = np.array(_SCREAMING_SNAKE_CASE ) _snake_case = np.array(_SCREAMING_SNAKE_CASE ) return np.linalg.norm(a - b ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # integer division by time variable return consistent_heuristic(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) // t def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = g_function[start] + Wa * heuristics[i](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return ans def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = np.chararray((n, n) ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): _snake_case = """*""" for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if (j, (n - 1) - i) in blocks: _snake_case = """#""" _snake_case = """-""" _snake_case = back_pointer[goal] while x != start: ((_snake_case), (_snake_case)) = x # print(x) _snake_case = """-""" _snake_case = back_pointer[x] _snake_case = """-""" for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) _snake_case = back_pointer[goal] while x != start: print(_SCREAMING_SNAKE_CASE , end=""" """ ) _snake_case = back_pointer[x] print(_SCREAMING_SNAKE_CASE ) sys.exit() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): for itera in range(_SCREAMING_SNAKE_CASE ): open_list[itera].remove_element(_SCREAMING_SNAKE_CASE ) # print("s", s) # print("j", j) ((_snake_case), (_snake_case)) = s _snake_case = (x - 1, y) _snake_case = (x + 1, y) _snake_case = (x, y + 1) _snake_case = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(_SCREAMING_SNAKE_CASE ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(_SCREAMING_SNAKE_CASE ) _snake_case = -1 _snake_case = float("""inf""" ) if valid(_SCREAMING_SNAKE_CASE ) and g_function[neighbours] > g_function[s] + 1: _snake_case = g_function[s] + 1 _snake_case = s if neighbours not in close_list_anchor: open_list[0].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if neighbours not in close_list_inad: for var in range(1 , _SCREAMING_SNAKE_CASE ): if key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) <= Wa * key( _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): open_list[j].put( _SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __lowerCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase = make_common_ground() __lowerCAmelCase = blocks_blk # hyper parameters __lowerCAmelCase = 1 __lowerCAmelCase = 1 __lowerCAmelCase = 20 __lowerCAmelCase = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase = (0, 0) __lowerCAmelCase = (n - 1, n - 1) __lowerCAmelCase = 1 def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = {start: 0, goal: float("""inf""" )} _snake_case = {start: -1, goal: -1} _snake_case = [] _snake_case = set() for i in range(_SCREAMING_SNAKE_CASE ): open_list.append(PriorityQueue() ) open_list[i].put(_SCREAMING_SNAKE_CASE , key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _snake_case = [] _snake_case = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , _SCREAMING_SNAKE_CASE ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: _snake_case, _snake_case = open_list[i].top_show() visited.add(_SCREAMING_SNAKE_CASE ) expand_state( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) close_list_inad.append(_SCREAMING_SNAKE_CASE ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: _snake_case = open_list[0].top_show() visited.add(_SCREAMING_SNAKE_CASE ) expand_state( _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) close_list_anchor.append(_SCREAMING_SNAKE_CASE ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(_SCREAMING_SNAKE_CASE ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
341
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self , UpperCAmelCase ) -> Union[str, Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): _snake_case = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(UpperCAmelCase ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sgugger/tiny-distilbert-classification""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) # set architectures equal to `None` _snake_case = None _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Optional[int]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def lowercase (self ) -> Tuple: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> Union[str, Any]: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Dict: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase (self ) -> Any: _snake_case = """sshleifer/tiny-gpt2""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> int: _snake_case = """sshleifer/tinier_bart""" _snake_case = AutoConfig.from_pretrained(UpperCAmelCase ) _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] ) _snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase (self ) -> str: _snake_case = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() ) def lowercase (self ) -> int: _snake_case = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(UpperCAmelCase ): self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """current""" ) ) self.assertTrue(hasattr(UpperCAmelCase , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , ) _snake_case = PyTorchBenchmark(UpperCAmelCase ) _snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() )
341
1
'''simple docstring''' __lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure the supplied data is a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(_SCREAMING_SNAKE_CASE ) _snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data ) _snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0 if padding_needed: # The padding that will be added later _snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6) else: _snake_case = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode() + padding ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = ( """argument should be a bytes-like object or ASCII string, """ f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): try: _snake_case = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) _snake_case = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one _snake_case = encoded_data[:-padding] _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: _snake_case = """""".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data ) _snake_case = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 ) ] return bytes(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
341
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _snake_case, _snake_case = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _snake_case = int(max_value - min_value ) + 1 _snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
341
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'vocab_file': 'spiece.model'} __lowerCAmelCase = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __lowerCAmelCase = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 2 __lowerCAmelCase = 3 __lowerCAmelCase = 4 class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = "left" def __init__(self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<sep>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<eop>", "<eod>"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token _snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) _snake_case = 3 _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase ) @property def lowercase (self ) -> Optional[int]: return len(self.sp_model ) def lowercase (self ) -> Optional[Any]: _snake_case = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ) -> Any: _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__(self , UpperCAmelCase ) -> Tuple: _snake_case = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _snake_case = {} _snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase (self , UpperCAmelCase ) -> List[str]: if self.remove_space: _snake_case = """ """.join(inputs.strip().split() ) else: _snake_case = inputs _snake_case = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _snake_case = unicodedata.normalize("""NFKD""" , UpperCAmelCase ) _snake_case = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase )] ) if self.do_lower_case: _snake_case = outputs.lower() return outputs def lowercase (self , UpperCAmelCase ) -> List[str]: _snake_case = self.preprocess_text(UpperCAmelCase ) _snake_case = self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) _snake_case = [] for piece in pieces: if len(UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _snake_case = cur_pieces[1:] else: _snake_case = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase ) else: new_pieces.append(UpperCAmelCase ) return new_pieces def lowercase (self , UpperCAmelCase ) -> int: return self.sp_model.PieceToId(UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Union[str, Any]: return self.sp_model.IdToPiece(UpperCAmelCase ) def lowercase (self , UpperCAmelCase ) -> Tuple: _snake_case = """""".join(UpperCAmelCase ).replace(UpperCAmelCase , """ """ ).strip() return out_string def lowercase (self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ) -> str: _snake_case = kwargs.pop("""use_source_tokenizer""" , UpperCAmelCase ) _snake_case = self.convert_ids_to_tokens(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _snake_case = [] _snake_case = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) ) _snake_case = [] sub_texts.append(UpperCAmelCase ) else: current_sub_text.append(UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _snake_case = """""".join(UpperCAmelCase ) _snake_case = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _snake_case = self.clean_up_tokenization(UpperCAmelCase ) return clean_text else: return text def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1, 1] return ([0] * len(UpperCAmelCase )) + [1, 1] def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _snake_case = os.path.join( UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , """wb""" ) as fi: _snake_case = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
341
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __lowerCAmelCase = logging.get_logger(__name__) class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase=None ) -> int: if not conversation_id: _snake_case = uuid.uuida() if past_user_inputs is None: _snake_case = [] if generated_responses is None: _snake_case = [] _snake_case = conversation_id _snake_case = past_user_inputs _snake_case = generated_responses _snake_case = text def __eq__(self , UpperCAmelCase ) -> Dict: if not isinstance(UpperCAmelCase , UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowercase (self , UpperCAmelCase , UpperCAmelCase = False ) -> int: if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) _snake_case = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: _snake_case = text def lowercase (self ) -> int: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) _snake_case = None def lowercase (self , UpperCAmelCase ) -> Any: self.generated_responses.append(UpperCAmelCase ) def lowercase (self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__(self ) -> Optional[int]: _snake_case = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): _snake_case = """user""" if is_user else """bot""" output += f"""{name} >> {text} \n""" return output @add_end_docstrings( __snake_case , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.tokenizer.pad_token_id is None: _snake_case = self.tokenizer.eos_token def lowercase (self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict: _snake_case = {} _snake_case = {} _snake_case = {} if min_length_for_response is not None: _snake_case = min_length_for_response if minimum_tokens is not None: _snake_case = minimum_tokens if "max_length" in generate_kwargs: _snake_case = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _snake_case = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__(self , UpperCAmelCase , UpperCAmelCase=0 , **UpperCAmelCase ) -> Union[str, Any]: _snake_case = super().__call__(UpperCAmelCase , num_workers=UpperCAmelCase , **UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1: return outputs[0] return outputs def lowercase (self , UpperCAmelCase , UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): _snake_case = self.tokenizer._build_conversation_input_ids(UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version _snake_case = self._legacy_parse_and_tokenize(UpperCAmelCase ) if self.framework == "pt": _snake_case = torch.LongTensor([input_ids] ) elif self.framework == "tf": _snake_case = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowercase (self , UpperCAmelCase , UpperCAmelCase=10 , **UpperCAmelCase ) -> Optional[int]: _snake_case = generate_kwargs.get("""max_length""" , self.model.config.max_length ) _snake_case = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) _snake_case = max_length - minimum_tokens _snake_case = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: _snake_case = model_inputs["""attention_mask"""][:, -trim:] _snake_case = model_inputs.pop("""conversation""" ) _snake_case = max_length _snake_case = self.model.generate(**UpperCAmelCase , **UpperCAmelCase ) if self.model.config.is_encoder_decoder: _snake_case = 1 else: _snake_case = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowercase (self , UpperCAmelCase , UpperCAmelCase=True ) -> List[str]: _snake_case = model_outputs["""output_ids"""] _snake_case = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) _snake_case = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(UpperCAmelCase ) return conversation def lowercase (self , UpperCAmelCase ) -> Dict: _snake_case = self.tokenizer.eos_token_id _snake_case = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) if len(UpperCAmelCase ) > self.tokenizer.model_max_length: _snake_case = input_ids[-self.tokenizer.model_max_length :] return input_ids
341
1
'''simple docstring''' import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __lowerCAmelCase = datasets.logging.get_logger(__name__) __lowerCAmelCase = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __lowerCAmelCase = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __lowerCAmelCase = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="dummy_doc" ): _snake_case = {doc: key_lines} _snake_case = {doc: sys_lines} _snake_case = {} _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case, _snake_case = reader.get_doc_mentions(_SCREAMING_SNAKE_CASE , key_doc_lines[doc] , _SCREAMING_SNAKE_CASE ) key_singletons_num += singletons_num if NP_only or min_span: _snake_case = reader.set_annotated_parse_trees(_SCREAMING_SNAKE_CASE , key_doc_lines[doc] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case, _snake_case = reader.get_doc_mentions(_SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , _SCREAMING_SNAKE_CASE ) sys_singletons_num += singletons_num if NP_only or min_span: _snake_case = reader.set_annotated_parse_trees(_SCREAMING_SNAKE_CASE , key_doc_lines[doc] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if remove_nested: _snake_case, _snake_case = reader.remove_nested_coref_mentions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _snake_case, _snake_case = reader.remove_nested_coref_mentions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _snake_case = reader.get_mention_assignments(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = reader.get_mention_assignments(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( """Number of resulting singleton clusters in the key """ f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ """files, respectively""" ) return doc_coref_infos def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = get_coref_infos(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = {} _snake_case = 0 _snake_case = 0 for name, metric in metrics: _snake_case, _snake_case, _snake_case = evaluator.evaluate_documents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , ) if conll_subparts_num == 3: _snake_case = (conll / 3) * 100 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({"""conll_score""": conll} ) return output_scores def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _snake_case = line.split()[5] if not parse_col == "-": _snake_case = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowercase (self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ) -> List[Any]: _snake_case = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _snake_case = util.check_gold_parse_annotation(UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _snake_case = evaluate( key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , ) return score
341
'''simple docstring''' from math import factorial, radians def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ): _snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _snake_case = radians(_SCREAMING_SNAKE_CASE ) _snake_case = angle_in_radians _snake_case = 3 _snake_case = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) _snake_case = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__('doctest').testmod()
341
1