code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowerCAmelCase__ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv']) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]: inspect_dataset(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : str = path + '.py' assert script_name in os.listdir(lowerCamelCase_) assert "__pycache__" not in os.listdir(lowerCamelCase_) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning') @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning') @pytest.mark.parametrize('path' , ['accuracy']) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]: inspect_metric(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : int = path + '.py' assert script_name in os.listdir(lowerCamelCase_) assert "__pycache__" not in os.listdir(lowerCamelCase_) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]: UpperCamelCase__ : Optional[Any] = get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_) assert info.config_name == config_name assert list(info.splits.keys()) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]: with pytest.raises(lowerCamelCase_): get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Any = get_dataset_config_names(lowerCamelCase_) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : Union[str, Any] = get_dataset_infos(lowerCamelCase_) assert list(infos.keys()) == expected_configs UpperCamelCase__ : Tuple = expected_configs[0] assert expected_config in infos UpperCamelCase__ : List[Any] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys()) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : str = get_dataset_infos(lowerCamelCase_) assert expected_config in infos UpperCamelCase__ : Union[str, Any] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys()) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: with pytest.raises(lowerCamelCase_): get_dataset_split_names(lowerCamelCase_ , config_name=lowerCamelCase_)
709
'''simple docstring''' import numpy as np from PIL import Image def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : str = 0 # compute the shape of the output matrix UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape)) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size]) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[int] = 0 return updated_arr def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : Tuple = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : List[str] = 0 UpperCamelCase__ : List[Any] = 0 # compute the shape of the output matrix UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape)) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size])) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : Union[str, Any] = 0 UpperCamelCase__ : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image lowerCAmelCase__ = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
6
0
def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : Tuple = set({'(', '[', '{'}) UpperCamelCase__ : Dict = set({')', ']', '}'}) UpperCamelCase__ : Optional[Any] = {'{': '}', '[': ']', '(': ')'} for i in range(len(lowerCamelCase_)): if s[i] in open_brackets: stack.append(s[i]) elif s[i] in closed_brackets and ( len(lowerCamelCase_) == 0 or (len(lowerCamelCase_) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase_) == 0 def __UpperCAmelCase ( ) -> Optional[int]: UpperCamelCase__ : List[str] = input('Enter sequence of brackets: ') if is_balanced(lowerCamelCase_): print(lowerCamelCase_ , 'is balanced') else: print(lowerCamelCase_ , 'is not balanced') if __name__ == "__main__": main()
710
'''simple docstring''' from __future__ import annotations class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]): UpperCamelCase__ : int = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.') if len(UpperCAmelCase_) != 0: UpperCamelCase__ : str = len(rows[0]) if cols == 0: raise error for row in rows: if len(UpperCAmelCase_) != cols: raise error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise error UpperCamelCase__ : Optional[int] = rows else: UpperCamelCase__ : Optional[Any] = [] def __UpperCamelCase ( self : Union[str, Any]): return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property def __UpperCamelCase ( self : Dict): return len(self.rows) @property def __UpperCamelCase ( self : Tuple): return len(self.rows[0]) @property def __UpperCamelCase ( self : List[Any]): return (self.num_rows, self.num_columns) @property def __UpperCamelCase ( self : Any): return self.order[0] == self.order[1] def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[int] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : Dict): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0]) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0])) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns)) def __UpperCamelCase ( self : str): return bool(self.determinant()) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int): UpperCamelCase__ : Optional[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns) if other_column != column ] for other_row in range(self.num_rows) if other_row != row ] return Matrix(UpperCAmelCase_).determinant() def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int): if (row + column) % 2 == 0: return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : List[Any]): return Matrix( [ [self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)] for row in range(self.num_rows) ]) def __UpperCamelCase ( self : Optional[int]): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns) ] for row in range(self.minors().num_rows) ]) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse') return self.adjugate() * (1 / determinant) def __repr__( self : Any): return str(self.rows) def __str__( self : List[Any]): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]' for row in self.rows ]) + "]" ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix') if position is None: self.rows.append(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:] def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : int = TypeError( 'Column must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in column: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix') if position is None: UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)] else: UpperCamelCase__ : str = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows) ] def __eq__( self : List[Any] , UpperCAmelCase_ : object): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): return NotImplemented return self.rows == other.rows def __ne__( self : Any , UpperCAmelCase_ : object): return not self == other def __neg__( self : Union[str, Any]): return self * -1 def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Addition requires matrices of the same order') return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __sub__( self : Tuple , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order') return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float): if isinstance(UpperCAmelCase_ , (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows]) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second') return Matrix( [ [Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()] for row in self.rows ]) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix') def __pow__( self : Dict , UpperCAmelCase_ : int): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise TypeError('A Matrix can only be raised to the power of an int') if not self.is_square: raise ValueError('Only square matrices can be raised to a power') if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power') UpperCamelCase__ : str = self for _ in range(other - 1): result *= self return result @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]): return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_))) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]: UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_) UpperCamelCase__ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_) UpperCamelCase__ : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCamelCase_) UpperCamelCase__ : str = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": UpperCamelCase__ : Union[str, Any] = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCamelCase__ : Dict = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase__ : Dict = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].') # Encoder for layer_index in range(config.num_layers): UpperCamelCase__ : int = f'layers_{str(lowerCamelCase_)}' # Self-Attention UpperCamelCase__ : Any = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] UpperCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] UpperCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] UpperCamelCase__ : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization UpperCamelCase__ : int = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: UpperCamelCase__ : int = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] UpperCamelCase__ : Dict = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: UpperCamelCase__ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] UpperCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization UpperCamelCase__ : List[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning UpperCamelCase__ : List[Any] = flax_model.params['encoder']['block'][str(lowerCamelCase_)]['layer'] UpperCamelCase__ : Tuple = tax_attention_key UpperCamelCase__ : Optional[Any] = tax_attention_out UpperCamelCase__ : Tuple = tax_attention_query UpperCamelCase__ : Union[str, Any] = tax_attention_value UpperCamelCase__ : str = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase__ : Union[str, Any] = tax_global_layer_norm if split_mlp_wi: UpperCamelCase__ : int = tax_mlp_wi_a UpperCamelCase__ : int = tax_mlp_wi_a else: UpperCamelCase__ : str = tax_mlp_wi UpperCamelCase__ : Tuple = tax_mlp_wo UpperCamelCase__ : Union[str, Any] = tax_mlp_layer_norm UpperCamelCase__ : Any = flax_model_encoder_layer_block # Only for layer 0: UpperCamelCase__ : str = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T UpperCamelCase__ : Tuple = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCamelCase__ : Union[str, Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T UpperCamelCase__ : Optional[int] = tax_encoder_global_rel_embedding # Assigning UpperCamelCase__ : Dict = tax_model['target']['encoder']['encoder_norm']['scale'] UpperCamelCase__ : str = tax_encoder_norm # Decoder for layer_index in range(config.num_layers): UpperCamelCase__ : List[Any] = f'layers_{str(lowerCamelCase_)}' # Self-Attention UpperCamelCase__ : int = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] UpperCamelCase__ : Optional[int] = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] UpperCamelCase__ : Any = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] UpperCamelCase__ : Tuple = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization UpperCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention UpperCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] UpperCamelCase__ : List[Any] = tax_enc_dec_attention_module['key']['kernel'] UpperCamelCase__ : str = tax_enc_dec_attention_module['out']['kernel'] UpperCamelCase__ : List[Any] = tax_enc_dec_attention_module['query']['kernel'] UpperCamelCase__ : Union[str, Any] = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization UpperCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: UpperCamelCase__ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] UpperCamelCase__ : int = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: UpperCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] UpperCamelCase__ : str = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization UpperCamelCase__ : Any = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning UpperCamelCase__ : List[str] = flax_model.params['decoder']['block'][str(lowerCamelCase_)]['layer'] UpperCamelCase__ : List[Any] = tax_attention_key UpperCamelCase__ : int = tax_attention_out UpperCamelCase__ : Tuple = tax_attention_query UpperCamelCase__ : Dict = tax_attention_value UpperCamelCase__ : int = tax_pre_attention_layer_norm UpperCamelCase__ : Optional[int] = tax_enc_dec_attention_key UpperCamelCase__ : List[Any] = tax_enc_dec_attention_out UpperCamelCase__ : int = tax_enc_dec_attention_query UpperCamelCase__ : Dict = tax_enc_dec_attention_value UpperCamelCase__ : Tuple = tax_cross_layer_norm if split_mlp_wi: UpperCamelCase__ : int = tax_mlp_wi_a UpperCamelCase__ : Dict = tax_mlp_wi_a else: UpperCamelCase__ : int = tax_mlp_wi UpperCamelCase__ : Any = tax_mlp_wo UpperCamelCase__ : Any = txa_mlp_layer_norm UpperCamelCase__ : Tuple = flax_model_decoder_layer_block # Decoder Normalization UpperCamelCase__ : Optional[int] = tax_model['target']['decoder']['decoder_norm']['scale'] UpperCamelCase__ : Optional[int] = txa_decoder_norm # Only for layer 0: UpperCamelCase__ : Any = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T UpperCamelCase__ : List[Any] = tax_decoder_rel_embedding # Token Embeddings UpperCamelCase__ : Dict = tax_model['target']['token_embedder']['embedding'] UpperCamelCase__ : int = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCamelCase__ : Union[str, Any] = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(lowerCamelCase_) print('T5X Model was sucessfully converted!') if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) lowerCAmelCase__ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
711
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowercase : def __UpperCamelCase ( self : Union[str, Any]): torch.manual_seed(0) UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : List[str] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Dict): torch.manual_seed(0) UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : str = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[str] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Any): UpperCamelCase__ : Dict = self.get_dummy_components() UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = inputs['prompt'] UpperCamelCase__ : List[Any] = inputs['generator'] UpperCamelCase__ : Tuple = inputs['num_inference_steps'] UpperCamelCase__ : List[Any] = inputs['output_type'] if "image" in inputs: UpperCamelCase__ : Tuple = inputs['image'] else: UpperCamelCase__ : Union[str, Any] = None if "mask_image" in inputs: UpperCamelCase__ : Optional[int] = inputs['mask_image'] else: UpperCamelCase__ : int = None if "original_image" in inputs: UpperCamelCase__ : List[Any] = inputs['original_image'] else: UpperCamelCase__ : Optional[Any] = None UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_) # inputs with prompt converted to embeddings UpperCamelCase__ : List[Any] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Dict = image if mask_image is not None: UpperCamelCase__ : Optional[int] = mask_image if original_image is not None: UpperCamelCase__ : Union[str, Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , ) UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = inputs['generator'] UpperCamelCase__ : List[Any] = inputs['num_inference_steps'] UpperCamelCase__ : Optional[int] = inputs['output_type'] # inputs with prompt converted to embeddings UpperCamelCase__ : Any = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Tuple = image if mask_image is not None: UpperCamelCase__ : Union[str, Any] = mask_image if original_image is not None: UpperCamelCase__ : str = original_image UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Any = self.get_dummy_components() UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4)
6
0
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (__lowerCamelCase ): def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Union[str, Any]="last" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , ): UpperCamelCase__ : int = parent UpperCamelCase__ : Any = batch_size UpperCamelCase__ : Tuple = seq_length UpperCamelCase__ : Tuple = is_training UpperCamelCase__ : Union[str, Any] = use_input_lengths UpperCamelCase__ : Any = use_token_type_ids UpperCamelCase__ : List[Any] = use_labels UpperCamelCase__ : Optional[Any] = gelu_activation UpperCamelCase__ : Union[str, Any] = sinusoidal_embeddings UpperCamelCase__ : Union[str, Any] = causal UpperCamelCase__ : Optional[Any] = asm UpperCamelCase__ : Union[str, Any] = n_langs UpperCamelCase__ : str = vocab_size UpperCamelCase__ : int = n_special UpperCamelCase__ : Dict = hidden_size UpperCamelCase__ : List[str] = num_hidden_layers UpperCamelCase__ : Tuple = num_attention_heads UpperCamelCase__ : str = hidden_dropout_prob UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob UpperCamelCase__ : int = max_position_embeddings UpperCamelCase__ : Optional[Any] = type_vocab_size UpperCamelCase__ : Optional[Any] = type_sequence_label_size UpperCamelCase__ : Dict = initializer_range UpperCamelCase__ : int = num_labels UpperCamelCase__ : Optional[Any] = num_choices UpperCamelCase__ : Dict = summary_type UpperCamelCase__ : int = use_proj UpperCamelCase__ : Optional[int] = scope def __UpperCamelCase ( self : Dict): UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length]) UpperCamelCase__ : Any = None if self.use_input_lengths: UpperCamelCase__ : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) UpperCamelCase__ : str = None UpperCamelCase__ : List[str] = None UpperCamelCase__ : int = None if self.use_labels: UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , 2).float() UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices) UpperCamelCase__ : Tuple = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __UpperCamelCase ( self : Union[str, Any]): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , ): UpperCamelCase__ : str = FlaubertModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = model(UpperCAmelCase_ , langs=UpperCAmelCase_) UpperCamelCase__ : Any = model(UpperCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCamelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , ): UpperCamelCase__ : List[Any] = FlaubertWithLMHeadModel(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , ): UpperCamelCase__ : List[Any] = FlaubertForQuestionAnsweringSimple(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : List[Any] = model(UpperCAmelCase_) UpperCamelCase__ : List[str] = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , ): UpperCamelCase__ : str = FlaubertForQuestionAnswering(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = model( UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , ) UpperCamelCase__ : Optional[int] = model( UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , ) (UpperCamelCase__ ) : Tuple = result_with_labels.to_tuple() UpperCamelCase__ : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_) (UpperCamelCase__ ) : int = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ): UpperCamelCase__ : str = FlaubertForSequenceClassification(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Tuple = model(UpperCAmelCase_) UpperCamelCase__ : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , ): UpperCamelCase__ : List[Any] = self.num_labels UpperCamelCase__ : Optional[Any] = FlaubertForTokenClassification(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , ): UpperCamelCase__ : Optional[Any] = self.num_choices UpperCamelCase__ : List[Any] = FlaubertForMultipleChoice(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCamelCase__ : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCamelCase__ : Tuple = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCamelCase__ : List[str] = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def __UpperCamelCase ( self : Union[str, Any]): UpperCamelCase__ : List[Any] = self.prepare_config_and_inputs() ( UpperCamelCase__ ) : str = config_and_inputs UpperCamelCase__ : Optional[Any] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): _lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=False): UpperCamelCase__ : str = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": UpperCamelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_) return inputs_dict def __UpperCamelCase ( self : Union[str, Any]): UpperCamelCase__ : str = FlaubertModelTester(self) UpperCamelCase__ : int = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=37) def __UpperCamelCase ( self : Optional[Any]): self.config_tester.run_common_tests() def __UpperCamelCase ( self : str): UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase_) def __UpperCamelCase ( self : Any): UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any]): UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCAmelCase_) @slow def __UpperCamelCase ( self : Union[str, Any]): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : int = FlaubertModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @slow @require_torch_gpu def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return UpperCamelCase__ : str = True UpperCamelCase__ : List[Any] = model_class(config=UpperCAmelCase_) UpperCamelCase__ : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : Optional[int] = torch.jit.trace( UpperCAmelCase_ , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu'))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'traced_model.pt')) UpperCamelCase__ : List[str] = torch.jit.load(os.path.join(UpperCAmelCase_ , 'traced_model.pt') , map_location=UpperCAmelCase_) loaded(inputs_dict['input_ids'].to(UpperCAmelCase_) , inputs_dict['attention_mask'].to(UpperCAmelCase_)) @require_torch class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : str = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased') UpperCamelCase__ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]]) with torch.no_grad(): UpperCamelCase__ : str = model(UpperCAmelCase_)[0] UpperCamelCase__ : Optional[int] = torch.Size((1, 11, 768)) self.assertEqual(output.shape , UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4))
712
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase__ = 3 def __UpperCAmelCase ( lowerCamelCase_) -> int: print('Generating primitive root of p') while True: UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_) if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1: continue if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1: continue return g def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('Generating prime p...') UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number. UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p. UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety. UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_) UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p) UpperCamelCase__ : Optional[Any] = (key_size, d) return public_key, private_key def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None: if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'): print('\nWARNING:') print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' 'Use a different name or delete these files and re-run this program.') sys.exit() UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_) print(f'\nWriting public key to file {name}_pubkey.txt...') with open(f'{name}_pubkey.txt' , 'w') as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}') print(f'Writing private key to file {name}_privkey.txt...') with open(f'{name}_privkey.txt' , 'w') as fo: fo.write(f'{private_key[0]},{private_key[1]}') def __UpperCAmelCase ( ) -> None: print('Making key files...') make_key_files('elgamal' , 2_048) print('Key files generation successful') if __name__ == "__main__": main()
6
0
'''simple docstring''' def __UpperCAmelCase ( ) -> list[list[int]]: return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)] lowerCAmelCase__ = generate_large_matrix() lowerCAmelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __UpperCAmelCase ( lowerCamelCase_) -> None: assert all(row == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for row in grid) assert all(list(lowerCamelCase_) == sorted(lowerCamelCase_ , reverse=lowerCamelCase_) for col in zip(*lowerCamelCase_)) def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : List[str] = len(lowerCamelCase_) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: UpperCamelCase__ : int = (left + right) // 2 UpperCamelCase__ : int = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: UpperCamelCase__ : Union[str, Any] = mid + 1 else: UpperCamelCase__ : int = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : Dict = 0 UpperCamelCase__ : Tuple = len(grid[0]) for i in range(len(lowerCamelCase_)): UpperCamelCase__ : Dict = find_negative_index(grid[i][:bound]) total += bound return (len(lowerCamelCase_) * len(grid[0])) - total def __UpperCAmelCase ( lowerCamelCase_) -> int: return len([number for row in grid for number in row if number < 0]) def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : List[str] = 0 for row in grid: for i, number in enumerate(lowerCamelCase_): if number < 0: total += len(lowerCamelCase_) - i break return total def __UpperCAmelCase ( ) -> None: from timeit import timeit print('Running benchmarks') UpperCamelCase__ : Optional[int] = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): UpperCamelCase__ : Any = timeit(f'{func}(grid=grid)' , setup=lowerCamelCase_ , number=500) print(f'{func}() took {time:0.4f} seconds') if __name__ == "__main__": import doctest doctest.testmod() benchmark()
713
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase__ = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: for attribute in key.split('.'): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCamelCase__ : str = 'lm_head' UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_) if weight_type is not None: UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape else: UpperCamelCase__ : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": UpperCamelCase__ : Optional[Any] = value elif weight_type == "weight_g": UpperCamelCase__ : Union[str, Any] = value elif weight_type == "weight_v": UpperCamelCase__ : List[Any] = value elif weight_type == "bias": UpperCamelCase__ : Any = value else: UpperCamelCase__ : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : List[Any] = [] UpperCamelCase__ : int = fairseq_model.state_dict() UpperCamelCase__ : int = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]: UpperCamelCase__ : Any = True if "*" in mapped_key: UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2] UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_) if "weight_g" in name: UpperCamelCase__ : int = 'weight_g' elif "weight_v" in name: UpperCamelCase__ : Any = 'weight_v' elif "bias" in name: UpperCamelCase__ : Union[str, Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase__ : Any = 'weight' else: UpperCamelCase__ : Tuple = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) continue if not is_used: unused_weights.append(lowerCamelCase_) logger.warning(f'Unused weights: {unused_weights}') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1] UpperCamelCase__ : List[Any] = name.split('.') UpperCamelCase__ : Any = int(items[0]) UpperCamelCase__ : int = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) UpperCamelCase__ : Tuple = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) UpperCamelCase__ : int = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) UpperCamelCase__ : Optional[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) UpperCamelCase__ : List[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(lowerCamelCase_) @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple: if config_path is not None: UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_) else: UpperCamelCase__ : int = UniSpeechConfig() if is_finetuned: if dict_path: UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase__ : List[Any] = target_dict.pad_index UpperCamelCase__ : Dict = target_dict.bos_index UpperCamelCase__ : Union[str, Any] = target_dict.eos_index UpperCamelCase__ : Tuple = len(target_dict.symbols) UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json') if not os.path.isdir(lowerCamelCase_): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_)) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_) UpperCamelCase__ : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase__ : Any = 42 UpperCamelCase__ : List[str] = 43 with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle: json.dump(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , ) UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_) processor.save_pretrained(lowerCamelCase_) UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_) else: UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_) if is_finetuned: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path}) else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) UpperCamelCase__ : int = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) hf_unispeech.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase__ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
6
0
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase__ = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase__ = 0 lowerCAmelCase__ = 0xE_0_0_0 lowerCAmelCase__ = 0xE_0_0_1 lowerCAmelCase__ = 0xE_0_0_2 lowerCAmelCase__ = 0xE_0_0_3 lowerCAmelCase__ = 0xE_0_0_4 # Maps special codepoints to human-readable names. lowerCAmelCase__ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: '[CLS]', SEP: '[SEP]', BOS: '[BOS]', MASK: '[MASK]', PAD: '[PAD]', RESERVED: '[RESERVED]', } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class __lowercase (__lowerCamelCase ): _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , UpperCAmelCase_ : Union[str, Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : int=chr(UpperCAmelCase_) , UpperCAmelCase_ : int=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : Union[str, Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : List[Any]=chr(UpperCAmelCase_) , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=2_048 , **UpperCAmelCase_ : Optional[int] , ): UpperCamelCase__ : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token UpperCamelCase__ : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , model_max_length=UpperCAmelCase_ , **UpperCAmelCase_ , ) # Creates a mapping for looking up the IDs of special symbols. UpperCamelCase__ : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): UpperCamelCase__ : List[Any] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. UpperCamelCase__ : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } UpperCamelCase__ : Optional[int] = UNICODE_VOCAB_SIZE UpperCamelCase__ : Union[str, Any] = len(self._special_codepoints) @property def __UpperCamelCase ( self : Dict): return self._unicode_vocab_size def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): return list(UpperCAmelCase_) def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): try: return ord(UpperCAmelCase_) except TypeError: raise ValueError(F'invalid token: \'{token}\'') def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCAmelCase_) except TypeError: raise ValueError(F'invalid id: {index}') def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[Any]): return "".join(UpperCAmelCase_) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : List[str] = [self.sep_token_id] UpperCamelCase__ : Union[str, Any] = [self.cls_token_id] UpperCamelCase__ : Dict = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) UpperCamelCase__ : Optional[int] = [1] + ([0] * len(UpperCAmelCase_)) + [1] if token_ids_a is not None: result += ([0] * len(UpperCAmelCase_)) + [1] return result def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Tuple = [self.sep_token_id] UpperCamelCase__ : str = [self.cls_token_id] UpperCamelCase__ : Tuple = len(cls + token_ids_a + sep) * [0] if token_ids_a is not None: result += len(token_ids_a + sep) * [1] return result def __UpperCamelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): return ()
714
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[str]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Union[str, Any] = 1 UpperCamelCase__ : Union[str, Any] = 3 UpperCamelCase__ : Dict = (32, 32) UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_) return image @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCamelCase ( self : str): torch.manual_seed(0) UpperCamelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[Any]): def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict): class __lowercase : def __init__( self : List[Any]): UpperCamelCase__ : Optional[Any] = torch.ones([0]) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int): self.pixel_values.to(UpperCAmelCase_) return self return Out() return extract def __UpperCamelCase ( self : str): UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : Any = self.dummy_cond_unet UpperCamelCase__ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) UpperCamelCase__ : List[str] = self.dummy_vae UpperCamelCase__ : str = self.dummy_text_encoder UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Tuple = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : int = self.dummy_cond_unet UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Optional[int] = self.dummy_vae UpperCamelCase__ : Optional[int] = self.dummy_text_encoder UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Dict = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) assert isinstance(pipe.scheduler , UpperCAmelCase_) assert pipe.safety_checker is None UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Dict = self.dummy_cond_unet UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Any = self.dummy_vae UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # put models in fp16 UpperCamelCase__ : Any = unet.half() UpperCamelCase__ : Tuple = vae.half() UpperCamelCase__ : Optional[int] = bert.half() # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[int] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger' UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) UpperCamelCase__ : Any = 4_003_660_346 UpperCamelCase__ : Any = 7 # without safety guidance (sld_guidance_scale = 0) UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : str = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # without safety guidance (strong configuration) UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Dict = output.images UpperCamelCase__ : str = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity' UpperCamelCase__ : Tuple = 2_734_971_755 UpperCamelCase__ : Tuple = 7 UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : int = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : int = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5') UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : int = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) UpperCamelCase__ : Any = 1_044_355_234 UpperCamelCase__ : Optional[int] = 12 UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7 UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : List[str] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
715
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCAmelCase__ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowerCAmelCase__ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __UpperCAmelCase ( ) -> Union[str, Any]: UpperCamelCase__ : Optional[Any] = ( list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1)) ) UpperCamelCase__ : List[Any] = bs[:] UpperCamelCase__ : Optional[int] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase_) cs.append(2**8 + n) n += 1 UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_)) def __UpperCAmelCase ( lowerCamelCase_) -> Tuple: UpperCamelCase__ : Any = set() UpperCamelCase__ : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char)) UpperCamelCase__ : str = char return pairs class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ): UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle: UpperCamelCase__ : Any = json.load(UpperCAmelCase_) UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()} UpperCamelCase__ : Any = errors # how to handle errors in decoding UpperCamelCase__ : Tuple = bytes_to_unicode() UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle: UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1] UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges] UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Dict = {} UpperCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __UpperCamelCase ( self : Tuple): return len(self.encoder) def __UpperCamelCase ( self : Tuple): return dict(self.encoder , **self.added_tokens_encoder) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]): if token in self.cache: return self.cache[token] UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_) UpperCamelCase__ : int = get_pairs(UpperCAmelCase_) if not pairs: return token while True: UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf'))) if bigram not in self.bpe_ranks: break UpperCamelCase__, UpperCamelCase__ : Tuple = bigram UpperCamelCase__ : Dict = [] UpperCamelCase__ : Optional[int] = 0 while i < len(UpperCAmelCase_): try: UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) UpperCamelCase__ : Any = j if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_) UpperCamelCase__ : Dict = new_word if len(UpperCAmelCase_) == 1: break else: UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : List[Any] = word return word def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any): UpperCamelCase__ : Optional[Any] = [] for token in re.findall(self.pat , UpperCAmelCase_): UpperCamelCase__ : Optional[int] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' ')) return bpe_tokens def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token)) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]): return self.decoder.get(UpperCAmelCase_) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int): UpperCamelCase__ : int = ''.join(UpperCAmelCase_) UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): if not os.path.isdir(UpperCAmelCase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return UpperCamelCase__ : str = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n') UpperCamelCase__ : str = 0 with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!') UpperCamelCase__ : List[Any] = token_index writer.write(' '.join(UpperCAmelCase_) + '\n') index += 1 return vocab_file, merge_file def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_)) + [1] return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1] def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Any = [self.sep_token_id] UpperCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]): UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()): UpperCamelCase__ : str = ' ' + text return (text, kwargs) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): return token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"): UpperCamelCase__ : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text) else: # Generated responses should contain them already. inputs.append(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : int = self.encode(UpperCAmelCase_) if len(UpperCAmelCase_) > self.model_max_length: UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.') return input_ids
6
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''table-transformer''' _lowerCamelCase = ['''past_key_values'''] _lowerCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : int , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=100 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : List[str]=2_048 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Tuple=2_048 , UpperCAmelCase_ : Optional[int]=8 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str="relu" , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int="sine" , UpperCAmelCase_ : Dict="resnet50" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Tuple=0.1 , **UpperCAmelCase_ : Tuple , ): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') UpperCamelCase__ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : Optional[Any] = backbone_config.get('model_type') UpperCamelCase__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] UpperCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCAmelCase_) # set timm attributes to None UpperCamelCase__ : Union[str, Any] = None, None, None UpperCamelCase__ : Any = use_timm_backbone UpperCamelCase__ : str = backbone_config UpperCamelCase__ : Dict = num_channels UpperCamelCase__ : Optional[Any] = num_queries UpperCamelCase__ : List[str] = d_model UpperCamelCase__ : List[Any] = encoder_ffn_dim UpperCamelCase__ : str = encoder_layers UpperCamelCase__ : Dict = encoder_attention_heads UpperCamelCase__ : str = decoder_ffn_dim UpperCamelCase__ : Any = decoder_layers UpperCamelCase__ : Optional[Any] = decoder_attention_heads UpperCamelCase__ : Optional[int] = dropout UpperCamelCase__ : Tuple = attention_dropout UpperCamelCase__ : Tuple = activation_dropout UpperCamelCase__ : Dict = activation_function UpperCamelCase__ : int = init_std UpperCamelCase__ : int = init_xavier_std UpperCamelCase__ : Optional[int] = encoder_layerdrop UpperCamelCase__ : List[Any] = decoder_layerdrop UpperCamelCase__ : Any = encoder_layers UpperCamelCase__ : Union[str, Any] = auxiliary_loss UpperCamelCase__ : Optional[int] = position_embedding_type UpperCamelCase__ : Any = backbone UpperCamelCase__ : Optional[int] = use_pretrained_backbone UpperCamelCase__ : Tuple = dilation # Hungarian matcher UpperCamelCase__ : List[Any] = class_cost UpperCamelCase__ : Any = bbox_cost UpperCamelCase__ : str = giou_cost # Loss coefficients UpperCamelCase__ : List[Any] = mask_loss_coefficient UpperCamelCase__ : Optional[int] = dice_loss_coefficient UpperCamelCase__ : str = bbox_loss_coefficient UpperCamelCase__ : Union[str, Any] = giou_loss_coefficient UpperCamelCase__ : Any = eos_coefficient super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_) @property def __UpperCamelCase ( self : Any): return self.encoder_attention_heads @property def __UpperCamelCase ( self : List[Any]): return self.d_model class __lowercase (__lowerCamelCase ): _lowerCamelCase = version.parse('''1.11''' ) @property def __UpperCamelCase ( self : List[Any]): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def __UpperCamelCase ( self : Dict): return 1e-5 @property def __UpperCamelCase ( self : List[Any]): return 12
716
'''simple docstring''' import requests from bsa import BeautifulSoup def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str: UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser') UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_).find('span').text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
6
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class __lowercase (__lowerCamelCase ): def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple): warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
717
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') model.to(UpperCAmelCase_) from datasets import load_dataset UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo') UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB') UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_) UpperCamelCase__ : Tuple = outputs.logits UpperCamelCase__ : str = torch.Size((1, 16)) self.assertEqual(logits.shape , UpperCAmelCase_) UpperCamelCase__ : Tuple = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str: if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive') UpperCamelCase__ : Tuple = str(bin(lowerCamelCase_))[2:] # remove the leading "0b" UpperCamelCase__ : str = str(bin(lowerCamelCase_))[2:] UpperCamelCase__ : List[str] = max(len(lowerCamelCase_) , len(lowerCamelCase_)) return "0b" + "".join( str(int('1' in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_) , b_binary.zfill(lowerCamelCase_))) if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' import argparse import struct import unittest class __lowercase : def __init__( self : Tuple , UpperCAmelCase_ : bytes): UpperCamelCase__ : Dict = data # Initialize hash values UpperCamelCase__ : Any = [ 0X6A_09E_667, 0XBB_67A_E85, 0X3C_6EF_372, 0XA5_4FF_53A, 0X51_0E5_27F, 0X9B_056_88C, 0X1F_83D_9AB, 0X5B_E0C_D19, ] # Initialize round constants UpperCamelCase__ : List[Any] = [ 0X42_8A2_F98, 0X71_374_491, 0XB5_C0F_BCF, 0XE9_B5D_BA5, 0X39_56C_25B, 0X59_F11_1F1, 0X92_3F8_2A4, 0XAB_1C5_ED5, 0XD8_07A_A98, 0X12_835_B01, 0X24_318_5BE, 0X55_0C7_DC3, 0X72_BE5_D74, 0X80_DEB_1FE, 0X9B_DC0_6A7, 0XC1_9BF_174, 0XE4_9B6_9C1, 0XEF_BE4_786, 0X0F_C19_DC6, 0X24_0CA_1CC, 0X2D_E92_C6F, 0X4A_748_4AA, 0X5C_B0A_9DC, 0X76_F98_8DA, 0X98_3E5_152, 0XA8_31C_66D, 0XB0_032_7C8, 0XBF_597_FC7, 0XC6_E00_BF3, 0XD5_A79_147, 0X06_CA6_351, 0X14_292_967, 0X27_B70_A85, 0X2E_1B2_138, 0X4D_2C6_DFC, 0X53_380_D13, 0X65_0A7_354, 0X76_6A0_ABB, 0X81_C2C_92E, 0X92_722_C85, 0XA2_BFE_8A1, 0XA8_1A6_64B, 0XC2_4B8_B70, 0XC7_6C5_1A3, 0XD1_92E_819, 0XD6_990_624, 0XF4_0E3_585, 0X10_6AA_070, 0X19_A4C_116, 0X1E_376_C08, 0X27_487_74C, 0X34_B0B_CB5, 0X39_1C0_CB3, 0X4E_D8A_A4A, 0X5B_9CC_A4F, 0X68_2E6_FF3, 0X74_8F8_2EE, 0X78_A56_36F, 0X84_C87_814, 0X8C_C70_208, 0X90_BEF_FFA, 0XA4_506_CEB, 0XBE_F9A_3F7, 0XC6_717_8F2, ] UpperCamelCase__ : Tuple = self.preprocessing(self.data) self.final_hash() @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : bytes): UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64)) UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8)) return data + padding + big_endian_integer def __UpperCamelCase ( self : Union[str, Any]): # Convert into blocks of 64 bytes UpperCamelCase__ : int = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data) , 64) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_)) # add 48 0-ed integers words += [0] * 48 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes for index in range(0 , 64): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase__ : Dict = ( self.ror(words[index - 15] , 7) ^ self.ror(words[index - 15] , 18) ^ (words[index - 15] >> 3) ) UpperCamelCase__ : Tuple = ( self.ror(words[index - 2] , 17) ^ self.ror(words[index - 2] , 19) ^ (words[index - 2] >> 10) ) UpperCamelCase__ : int = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25) UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g) UpperCamelCase__ : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22) UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase__ : Optional[Any] = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes) ] UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes]) def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int): return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : int): import hashlib UpperCamelCase__ : str = bytes('Test String' , 'utf-8') self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest()) def __UpperCAmelCase ( ) -> None: import doctest doctest.testmod() UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file') UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : str = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb') as f: UpperCamelCase__ : Any = f.read() else: UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8') print(SHAaaa(lowerCamelCase_).hash) if __name__ == "__main__": main()
6
0
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowercase : def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : str=1_000 , UpperCAmelCase_ : Any=[3, 3, 6, 4] , UpperCAmelCase_ : Tuple=[48, 56, 112, 220] , ): UpperCamelCase__ : Optional[Any] = parent UpperCamelCase__ : Tuple = batch_size UpperCamelCase__ : Union[str, Any] = num_channels UpperCamelCase__ : List[str] = is_training UpperCamelCase__ : Optional[Any] = use_labels UpperCamelCase__ : Tuple = hidden_dropout_prob UpperCamelCase__ : Any = attention_probs_dropout_prob UpperCamelCase__ : Optional[Any] = num_labels UpperCamelCase__ : Tuple = image_size UpperCamelCase__ : str = layer_depths UpperCamelCase__ : str = embed_dims def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCamelCase__ : str = None if self.use_labels: UpperCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_labels) UpperCamelCase__ : Optional[int] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : List[Any]): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase_ , layer_scale_init_value=1e-5 , ) def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]): UpperCamelCase__ : Dict = SwiftFormerModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Dict = model(UpperCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7)) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]): UpperCamelCase__ : Dict = self.num_labels UpperCamelCase__ : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : str = model(UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) UpperCamelCase__ : Union[str, Any] = SwiftFormerForImageClassification(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCamelCase__ : List[str] = model(UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __UpperCamelCase ( self : List[str]): (UpperCamelCase__) : Tuple = self.prepare_config_and_inputs() UpperCamelCase__ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): _lowerCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _lowerCamelCase = ( {'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def __UpperCamelCase ( self : Any): UpperCamelCase__ : Union[str, Any] = SwiftFormerModelTester(self) UpperCamelCase__ : Optional[int] = ConfigTester( self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __UpperCamelCase ( self : Optional[int]): self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds') def __UpperCamelCase ( self : int): pass def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Dict = model_class(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear)) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_) UpperCamelCase__ : List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()] UpperCamelCase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any]): UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_) @slow def __UpperCamelCase ( self : List[Any]): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Dict = SwiftFormerModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) @unittest.skip(reason='SwiftFormer does not output attentions') def __UpperCamelCase ( self : Optional[Any]): pass def __UpperCamelCase ( self : Union[str, Any]): def check_hidden_states_output(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict): UpperCamelCase__ : Optional[int] = model_class(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() with torch.no_grad(): UpperCamelCase__ : Any = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) UpperCamelCase__ : List[Any] = outputs.hidden_states UpperCamelCase__ : Any = 8 self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCAmelCase_)): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ]) , ) UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : str = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase__ : Optional[Any] = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any]): def _config_zero_init(UpperCAmelCase_ : Any): UpperCamelCase__ : Optional[Any] = copy.deepcopy(UpperCAmelCase_) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCAmelCase_ , UpperCAmelCase_ , 1e-10) if isinstance(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_): UpperCamelCase__ : Optional[int] = _config_zero_init(getattr(UpperCAmelCase_ , UpperCAmelCase_)) setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) return configs_no_init UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ : Dict = _config_zero_init(UpperCAmelCase_) for model_class in self.all_model_classes: UpperCamelCase__ : List[str] = model_class(config=UpperCAmelCase_) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self : List[str]): pass def __UpperCAmelCase ( ) -> Dict: UpperCamelCase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class __lowercase (unittest.TestCase ): @cached_property def __UpperCamelCase ( self : Optional[Any]): return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs') if is_vision_available() else None @slow def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Union[str, Any] = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs').to(UpperCAmelCase_) UpperCamelCase__ : Any = self.default_image_processor UpperCamelCase__ : Tuple = prepare_img() UpperCamelCase__ : Dict = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_) # verify the logits UpperCamelCase__ : Dict = torch.Size((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase_) UpperCamelCase__ : Tuple = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]]).to(UpperCAmelCase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
719
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase__ = 300 # TEMPERATURE (unit = K) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float: if donor_conc <= 0: raise ValueError('Donor concentration should be positive') elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive') elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive') elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration') elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration') else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__lowerCamelCase ) class __lowercase (__lowerCamelCase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization _lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) _lowerCamelCase = Features({'''text''': Value('''string''' )} ) _lowerCamelCase = Features({'''labels''': ClassLabel} ) _lowerCamelCase = '''text''' _lowerCamelCase = '''labels''' def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]): if self.label_column not in features: raise ValueError(F'Column {self.label_column} is not present in features.') if not isinstance(features[self.label_column] , UpperCAmelCase_): raise ValueError(F'Column {self.label_column} is not a ClassLabel.') UpperCamelCase__ : Dict = copy.deepcopy(self) UpperCamelCase__ : int = self.label_schema.copy() UpperCamelCase__ : Union[str, Any] = features[self.label_column] UpperCamelCase__ : Tuple = label_schema return task_template @property def __UpperCamelCase ( self : Optional[int]): return { self.text_column: "text", self.label_column: "labels", }
720
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]: UpperCamelCase__ : int = [] if isinstance(lowerCamelCase_ , lowerCamelCase_): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , (list, tuple)): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , torch.Tensor): shapes.append(tree.shape) else: raise ValueError('Not supported') return shapes @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]: UpperCamelCase__ : int = [] for d in reversed(lowerCamelCase_): idx.append(flat_idx % d) UpperCamelCase__ : Any = flat_idx // d return tuple(reversed(lowerCamelCase_)) @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase_) -> None: UpperCamelCase__ : Tuple = True for i in range(len(lowerCamelCase_)): UpperCamelCase__ : List[Any] = -1 * (i + 1) l[reversed_idx] &= tally UpperCamelCase__ : Optional[Any] = l[reversed_idx] if start_edges is None: UpperCamelCase__ : int = [s == 0 for s in start] reduce_edge_list(lowerCamelCase_) if end_edges is None: UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)] reduce_edge_list(lowerCamelCase_) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase_) == 0: return [()] elif len(lowerCamelCase_) == 1: return [(slice(start[0] , end[0] + 1),)] UpperCamelCase__ : List[Tuple[slice, ...]] = [] UpperCamelCase__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase_ , lowerCamelCase_): if s == e: path_list.append(slice(lowerCamelCase_ , s + 1)) else: break UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_) UpperCamelCase__ : Dict = len(lowerCamelCase_) # start == end, and we're done if divergence_idx == len(lowerCamelCase_): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : str = start[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , sdi + 1),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , )) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : Optional[int] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , edi + 1),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , )) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),)) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),)) slices.extend(lower()) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper()) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),)) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper()) UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),)) slices.extend(lower()) return slices @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor: UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims] UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_)) # _get_minimal_slice_set is inclusive UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_)) # Get an ordered list of slices to perform UpperCamelCase__ : int = _get_minimal_slice_set( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) UpperCamelCase__ : List[Any] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any: if not (len(lowerCamelCase_) > 0): raise ValueError('Must provide at least one input') UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)] UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)]) def _prep_inputs(lowerCamelCase_) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims]) == no_batch_dims: UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:]) else: UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_) UpperCamelCase__ : int = None if _out is not None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out) UpperCamelCase__ : Dict = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase_) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[Any] = prepped_outputs for _ in range(lowerCamelCase_): # Chunk the input if not low_mem: UpperCamelCase__ : str = _select_chunk else: UpperCamelCase__ : List[Any] = partial( _chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , ) UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_) # Run the layer on the chunk UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_) # Allocate space for the output if out is None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase_ , lowerCamelCase_): def assign(lowerCamelCase_ , lowerCamelCase_) -> None: for k, v in da.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_): assign(lowerCamelCase_ , da[k]) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCamelCase__ : List[str] = da[k] assign(lowerCamelCase_ , lowerCamelCase_) elif isinstance(lowerCamelCase_ , lowerCamelCase_): for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCamelCase__ : int = xa elif isinstance(lowerCamelCase_ , torch.Tensor): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCamelCase__ : Dict = output_chunk else: raise ValueError('Not supported') i += chunk_size UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_) return out class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ): UpperCamelCase__ : str = max_chunk_size UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[tuple] = None def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int): logging.info('Tuning chunk size...') if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)] UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size] UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCAmelCase_ : int) -> bool: try: with torch.no_grad(): fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_) return True except RuntimeError: return False UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1 while i > min_viable_chunk_size_index: UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i]) if not viable: UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2 else: UpperCamelCase__ : Optional[int] = i UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2 return candidates[min_viable_chunk_size_index] def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable): UpperCamelCase__ : List[str] = True for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_): assert type(UpperCAmelCase_) == type(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , (list, tuple)): consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) else: consistent &= aa == aa return consistent def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ): UpperCamelCase__ : List[Any] = True UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data) == len(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_) else: # Otherwise, we can reuse the precomputed value UpperCamelCase__ : Optional[int] = False if not consistent: UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
6
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '▁' lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } lowerCAmelCase__ = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } lowerCAmelCase__ = { 'facebook/s2t-small-librispeech-asr': 1024, } lowerCAmelCase__ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] lowerCAmelCase__ = {'mustc': MUSTC_LANGS} class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = MAX_MODEL_INPUT_SIZES _lowerCamelCase = ['''input_ids''', '''attention_mask'''] _lowerCamelCase = [] def __init__( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ): UpperCamelCase__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_upper_case=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , lang_codes=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) UpperCamelCase__ : List[str] = do_upper_case UpperCamelCase__ : Any = do_lower_case UpperCamelCase__ : Dict = load_json(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} UpperCamelCase__ : int = spm_file UpperCamelCase__ : List[str] = load_spm(UpperCAmelCase_ , self.sp_model_kwargs) if lang_codes is not None: UpperCamelCase__ : Union[str, Any] = lang_codes UpperCamelCase__ : List[Any] = LANGUAGES[lang_codes] UpperCamelCase__ : List[Any] = [F'<lang:{lang}>' for lang in self.langs] UpperCamelCase__ : Union[str, Any] = {lang: self.sp_model.PieceToId(F'<lang:{lang}>') for lang in self.langs} UpperCamelCase__ : int = self.lang_tokens UpperCamelCase__ : List[Any] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang) else: UpperCamelCase__ : int = {} @property def __UpperCamelCase ( self : List[Any]): return len(self.encoder) @property def __UpperCamelCase ( self : Any): return self._tgt_lang @tgt_lang.setter def __UpperCamelCase ( self : str , UpperCAmelCase_ : Any): UpperCamelCase__ : Optional[Any] = new_tgt_lang self.set_tgt_lang_special_tokens(UpperCAmelCase_) def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): UpperCamelCase__ : List[Any] = self.lang_code_to_id[tgt_lang] UpperCamelCase__ : Optional[int] = [lang_code_id] def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str): return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any]): return self.encoder.get(UpperCAmelCase_ , self.encoder[self.unk_token]) def __UpperCamelCase ( self : str , UpperCAmelCase_ : int): return self.decoder.get(UpperCAmelCase_ , self.unk_token) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[str]): UpperCamelCase__ : str = [] UpperCamelCase__ : int = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: UpperCamelCase__ : int = self.sp_model.decode(UpperCAmelCase_) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " UpperCamelCase__ : Optional[Any] = [] else: current_sub_tokens.append(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.sp_model.decode(UpperCAmelCase_) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = [1] * len(self.prefix_tokens) UpperCamelCase__ : str = [1] if token_ids_a is None: return prefix_ones + ([0] * len(UpperCAmelCase_)) + suffix_ones return prefix_ones + ([0] * len(UpperCAmelCase_)) + ([0] * len(UpperCAmelCase_)) + suffix_ones def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : List[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : str): UpperCamelCase__ : int = self.__dict__.copy() UpperCamelCase__ : Tuple = None return state def __setstate__( self : int , UpperCAmelCase_ : Dict): UpperCamelCase__ : Optional[int] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): UpperCamelCase__ : Union[str, Any] = {} UpperCamelCase__ : Any = load_spm(self.spm_file , self.sp_model_kwargs) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): UpperCamelCase__ : List[Any] = Path(UpperCAmelCase_) assert save_dir.is_dir(), F'{save_directory} should be a directory' UpperCamelCase__ : Dict = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) UpperCamelCase__ : str = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , UpperCAmelCase_) if os.path.abspath(self.spm_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.spm_file): copyfile(self.spm_file , UpperCAmelCase_) elif not os.path.isfile(self.spm_file): with open(UpperCAmelCase_ , 'wb') as fi: UpperCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_) return (str(UpperCAmelCase_), str(UpperCAmelCase_)) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> sentencepiece.SentencePieceProcessor: UpperCamelCase__ : Any = sentencepiece.SentencePieceProcessor(**lowerCamelCase_) spm.Load(str(lowerCamelCase_)) return spm def __UpperCAmelCase ( lowerCamelCase_) -> Union[Dict, List]: with open(lowerCamelCase_ , 'r') as f: return json.load(lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None: with open(lowerCamelCase_ , 'w') as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=2)
721
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : int = tempfile.mkdtemp() # fmt: off UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'} UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(UpperCAmelCase_) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(UpperCAmelCase_)) UpperCamelCase__ : Dict = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]): return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : str): shutil.rmtree(self.tmpdirname) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Union[str, Any] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer() UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_slow.save_pretrained(self.tmpdirname) UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_) UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_fast.save_pretrained(self.tmpdirname) UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0) UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : int = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : int = self.prepare_image_inputs() UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np') UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Any = 'lower newer' UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def __UpperCamelCase ( self : int): UpperCamelCase__ : Optional[int] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'lower newer' UpperCamelCase__ : List[Any] = self.prepare_image_inputs() UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_): processor() def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : Union[str, Any] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'lower newer' UpperCamelCase__ : Optional[int] = self.prepare_image_inputs() UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
6
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''unispeech''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Dict=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Any="group" , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : int=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=128 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Tuple=320 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : int="mean" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Tuple=256 , UpperCAmelCase_ : List[Any]=80 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=0.5 , **UpperCAmelCase_ : Dict , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_) UpperCamelCase__ : str = hidden_size UpperCamelCase__ : Tuple = feat_extract_norm UpperCamelCase__ : Any = feat_extract_activation UpperCamelCase__ : Any = list(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = list(UpperCAmelCase_) UpperCamelCase__ : int = list(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = conv_bias UpperCamelCase__ : Union[str, Any] = num_conv_pos_embeddings UpperCamelCase__ : str = num_conv_pos_embedding_groups UpperCamelCase__ : Union[str, Any] = len(self.conv_dim) UpperCamelCase__ : Any = num_hidden_layers UpperCamelCase__ : Union[str, Any] = intermediate_size UpperCamelCase__ : int = hidden_act UpperCamelCase__ : List[str] = num_attention_heads UpperCamelCase__ : str = hidden_dropout UpperCamelCase__ : Optional[int] = attention_dropout UpperCamelCase__ : List[str] = activation_dropout UpperCamelCase__ : Any = feat_proj_dropout UpperCamelCase__ : Any = final_dropout UpperCamelCase__ : Union[str, Any] = layerdrop UpperCamelCase__ : Tuple = layer_norm_eps UpperCamelCase__ : Union[str, Any] = initializer_range UpperCamelCase__ : Tuple = num_ctc_classes UpperCamelCase__ : List[str] = vocab_size UpperCamelCase__ : Optional[int] = do_stable_layer_norm UpperCamelCase__ : List[Any] = use_weighted_layer_sum UpperCamelCase__ : int = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase__ : int = apply_spec_augment UpperCamelCase__ : Tuple = mask_time_prob UpperCamelCase__ : List[Any] = mask_time_length UpperCamelCase__ : List[Any] = mask_time_min_masks UpperCamelCase__ : List[Any] = mask_feature_prob UpperCamelCase__ : Tuple = mask_feature_length UpperCamelCase__ : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCamelCase__ : Dict = num_codevectors_per_group UpperCamelCase__ : Tuple = num_codevector_groups UpperCamelCase__ : List[Any] = contrastive_logits_temperature UpperCamelCase__ : int = feat_quantizer_dropout UpperCamelCase__ : Union[str, Any] = num_negatives UpperCamelCase__ : List[str] = codevector_dim UpperCamelCase__ : List[Any] = proj_codevector_dim UpperCamelCase__ : Optional[Any] = diversity_loss_weight # ctc loss UpperCamelCase__ : Any = ctc_loss_reduction UpperCamelCase__ : Optional[int] = ctc_zero_infinity # pretraining loss UpperCamelCase__ : List[Any] = replace_prob @property def __UpperCamelCase ( self : Any): return functools.reduce(operator.mul , self.conv_stride , 1)
700
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None: UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_) for k, v in tqdm(state_dict.items()): if not isinstance(lowerCamelCase_ , torch.Tensor): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin') UpperCamelCase__ : int = v.half() if save_path is None: # overwrite src_path UpperCamelCase__ : List[Any] = src_path torch.save(lowerCamelCase_ , lowerCamelCase_) if __name__ == "__main__": fire.Fire(convert)
6
0
'''simple docstring''' import math import sys def __UpperCAmelCase ( lowerCamelCase_) -> int: if number != int(lowerCamelCase_): raise ValueError('the value of input must be a natural number') if number < 0: raise ValueError('the value of input must not be a negative number') if number == 0: return 1 UpperCamelCase__ : Optional[int] = [-1] * (number + 1) UpperCamelCase__ : Any = 0 for i in range(1 , number + 1): UpperCamelCase__ : Union[str, Any] = sys.maxsize UpperCamelCase__ : List[str] = int(math.sqrt(lowerCamelCase_)) for j in range(1 , root + 1): UpperCamelCase__ : str = 1 + answers[i - (j**2)] UpperCamelCase__ : int = min(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : List[Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
701
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''segformer''' def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ): super().__init__(**UpperCAmelCase_) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , ) UpperCamelCase__ : List[Any] = num_channels UpperCamelCase__ : Any = num_encoder_blocks UpperCamelCase__ : Dict = depths UpperCamelCase__ : int = sr_ratios UpperCamelCase__ : str = hidden_sizes UpperCamelCase__ : List[str] = patch_sizes UpperCamelCase__ : Optional[int] = strides UpperCamelCase__ : Dict = mlp_ratios UpperCamelCase__ : List[str] = num_attention_heads UpperCamelCase__ : int = hidden_act UpperCamelCase__ : Any = hidden_dropout_prob UpperCamelCase__ : str = attention_probs_dropout_prob UpperCamelCase__ : List[str] = classifier_dropout_prob UpperCamelCase__ : List[Any] = initializer_range UpperCamelCase__ : Union[str, Any] = drop_path_rate UpperCamelCase__ : int = layer_norm_eps UpperCamelCase__ : Dict = decoder_hidden_size UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_) UpperCamelCase__ : List[str] = semantic_loss_ignore_index class __lowercase (__lowerCamelCase ): _lowerCamelCase = version.parse('''1.11''' ) @property def __UpperCamelCase ( self : Optional[Any]): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def __UpperCamelCase ( self : Optional[Any]): return 1e-4 @property def __UpperCamelCase ( self : Any): return 12
6
0
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''efficientnet''' def __init__( self : Any , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 600 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 3.1 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase_ : List[int] = [] , UpperCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ : float = 0.25 , UpperCAmelCase_ : str = "swish" , UpperCAmelCase_ : int = 2_560 , UpperCAmelCase_ : str = "mean" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 0.0_01 , UpperCAmelCase_ : float = 0.99 , UpperCAmelCase_ : float = 0.5 , UpperCAmelCase_ : float = 0.2 , **UpperCAmelCase_ : int , ): super().__init__(**UpperCAmelCase_) UpperCamelCase__ : Dict = num_channels UpperCamelCase__ : List[str] = image_size UpperCamelCase__ : List[str] = width_coefficient UpperCamelCase__ : Union[str, Any] = depth_coefficient UpperCamelCase__ : Tuple = depth_divisor UpperCamelCase__ : Optional[Any] = kernel_sizes UpperCamelCase__ : Optional[int] = in_channels UpperCamelCase__ : Any = out_channels UpperCamelCase__ : Union[str, Any] = depthwise_padding UpperCamelCase__ : str = strides UpperCamelCase__ : Any = num_block_repeats UpperCamelCase__ : Any = expand_ratios UpperCamelCase__ : Optional[Any] = squeeze_expansion_ratio UpperCamelCase__ : Union[str, Any] = hidden_act UpperCamelCase__ : Union[str, Any] = hidden_dim UpperCamelCase__ : List[str] = pooling_type UpperCamelCase__ : List[Any] = initializer_range UpperCamelCase__ : int = batch_norm_eps UpperCamelCase__ : List[Any] = batch_norm_momentum UpperCamelCase__ : Optional[int] = dropout_rate UpperCamelCase__ : Optional[Any] = drop_connect_rate UpperCamelCase__ : Union[str, Any] = sum(UpperCAmelCase_) * 4 class __lowercase (__lowerCamelCase ): _lowerCamelCase = version.parse('''1.11''' ) @property def __UpperCamelCase ( self : Tuple): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def __UpperCamelCase ( self : List[Any]): return 1e-5
702
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]: return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)] if __name__ == "__main__": from doctest import testmod testmod()
6
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType lowerCAmelCase__ = None lowerCAmelCase__ = '<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image lowerCAmelCase__ = [ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class __lowercase : _lowerCamelCase = True _lowerCamelCase = None # Automatically constructed _lowerCamelCase = '''PIL.Image.Image''' _lowerCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _lowerCamelCase = field(default='''Image''' , init=__lowerCamelCase , repr=__lowerCamelCase ) def __call__( self : Union[str, Any]): return self.pa_type def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.') if isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : int = np.array(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase_ , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase_) elif isinstance(UpperCAmelCase_ , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase_) elif value.get('path') is not None and os.path.isfile(value['path']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path')} elif value.get('bytes') is not None or value.get('path') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes'), "path": value.get('path')} else: raise ValueError( F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.') def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : dict , UpperCAmelCase_ : int=None): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.') if token_per_repo_id is None: UpperCamelCase__ : int = {} UpperCamelCase__ : int = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.') else: if is_local_path(UpperCAmelCase_): UpperCamelCase__ : int = PIL.Image.open(UpperCAmelCase_) else: UpperCamelCase__ : str = path.split('::')[-1] try: UpperCamelCase__ : Union[str, Any] = string_to_dict(UpperCAmelCase_ , config.HUB_DATASETS_URL)['repo_id'] UpperCamelCase__ : Optional[Any] = token_per_repo_id.get(UpperCAmelCase_) except ValueError: UpperCamelCase__ : Any = None with xopen(UpperCAmelCase_ , 'rb' , use_auth_token=UpperCAmelCase_) as f: UpperCamelCase__ : Optional[int] = BytesIO(f.read()) UpperCamelCase__ : Tuple = PIL.Image.open(bytes_) else: UpperCamelCase__ : List[Any] = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def __UpperCamelCase ( self : List[str]): from .features import Value return ( self if self.decode else { "bytes": Value('binary'), "path": Value('string'), } ) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray]): if pa.types.is_string(storage.type): UpperCamelCase__ : Dict = pa.array([None] * len(UpperCAmelCase_) , type=pa.binary()) UpperCamelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): UpperCamelCase__ : Tuple = pa.array([None] * len(UpperCAmelCase_) , type=pa.string()) UpperCamelCase__ : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('bytes') >= 0: UpperCamelCase__ : Tuple = storage.field('bytes') else: UpperCamelCase__ : Tuple = pa.array([None] * len(UpperCAmelCase_) , type=pa.binary()) if storage.type.get_field_index('path') >= 0: UpperCamelCase__ : int = storage.field('path') else: UpperCamelCase__ : List[str] = pa.array([None] * len(UpperCAmelCase_) , type=pa.string()) UpperCamelCase__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null()) elif pa.types.is_list(storage.type): UpperCamelCase__ : Optional[Any] = pa.array( [encode_np_array(np.array(UpperCAmelCase_))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) UpperCamelCase__ : Optional[Any] = pa.array([None] * len(UpperCAmelCase_) , type=pa.string()) UpperCamelCase__ : Optional[int] = pa.StructArray.from_arrays( [bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase_ , self.pa_type) def __UpperCamelCase ( self : str , UpperCAmelCase_ : pa.StructArray): @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase_ : int): with xopen(UpperCAmelCase_ , 'rb') as f: UpperCamelCase__ : List[str] = f.read() return bytes_ UpperCamelCase__ : Optional[Any] = pa.array( [ (path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) UpperCamelCase__ : List[Any] = pa.array( [os.path.basename(UpperCAmelCase_) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , ) UpperCamelCase__ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase_ , self.pa_type) def __UpperCAmelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.') global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() UpperCamelCase__ : Tuple = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys())) return _IMAGE_COMPRESSION_FORMATS def __UpperCAmelCase ( lowerCamelCase_) -> bytes: UpperCamelCase__ : List[Any] = BytesIO() if image.format in list_image_compression_formats(): UpperCamelCase__ : Dict = image.format else: UpperCamelCase__ : List[str] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCamelCase_ , format=lowerCamelCase_) return buffer.getvalue() def __UpperCAmelCase ( lowerCamelCase_) -> dict: if hasattr(lowerCamelCase_ , 'filename') and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCamelCase_)} def __UpperCAmelCase ( lowerCamelCase_) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.') UpperCamelCase__ : List[Any] = array.dtype UpperCamelCase__ : Union[str, Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER UpperCamelCase__ : Optional[int] = dtype.kind UpperCamelCase__ : int = dtype.itemsize UpperCamelCase__ : List[str] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: UpperCamelCase__ : Union[str, Any] = np.dtype('|u1') if dtype_kind not in ["u", "i"]: raise TypeError( f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.') if dtype is not dest_dtype: warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'') # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: UpperCamelCase__ : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: UpperCamelCase__ : List[str] = dtype_byteorder + dtype_kind + str(lowerCamelCase_) UpperCamelCase__ : Any = np.dtype(lowerCamelCase_) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'') break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}') UpperCamelCase__ : Optional[int] = PIL.Image.fromarray(array.astype(lowerCamelCase_)) return {"path": None, "bytes": image_to_bytes(lowerCamelCase_)} def __UpperCAmelCase ( lowerCamelCase_) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.') if objs: UpperCamelCase__ : str = first_non_null_value(lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCamelCase_ , np.ndarray): UpperCamelCase__ : List[str] = no_op_if_value_is_null(lowerCamelCase_) return [obj_to_image_dict_func(lowerCamelCase_) for obj in objs] elif isinstance(lowerCamelCase_ , PIL.Image.Image): UpperCamelCase__ : Union[str, Any] = no_op_if_value_is_null(lowerCamelCase_) return [obj_to_image_dict_func(lowerCamelCase_) for obj in objs] else: return objs else: return objs
703
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def __UpperCAmelCase ( lowerCamelCase_) -> float: return np.dot(lowerCamelCase_ , lowerCamelCase_) class __lowercase : def __init__( self : Tuple , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ): UpperCamelCase__ : Union[str, Any] = regularization UpperCamelCase__ : Optional[int] = gamma if kernel == "linear": UpperCamelCase__ : List[str] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') UpperCamelCase__ : Union[str, Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}' raise ValueError(UpperCAmelCase_) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Any = observations UpperCamelCase__ : Tuple = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: UpperCamelCase__ : Union[str, Any] = 0 ((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0) UpperCamelCase__ : Dict = Bounds(0 , self.regularization) UpperCamelCase__ : Any = minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x UpperCamelCase__ : str = l_star # calculating mean offset of separation plane to points UpperCamelCase__ : Any = 0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) UpperCamelCase__ : List[str] = s / n def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Optional[int] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
6
0
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( 'The `image_to_image.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionImg2ImgPipeline` instead.' )
704
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_) -> Any: UpperCamelCase__ : Dict = DPTConfig() if "large" in checkpoint_url: UpperCamelCase__ : List[str] = 1_024 UpperCamelCase__ : List[str] = 4_096 UpperCamelCase__ : Optional[int] = 24 UpperCamelCase__ : List[str] = 16 UpperCamelCase__ : List[str] = [5, 11, 17, 23] UpperCamelCase__ : str = [256, 512, 1_024, 1_024] UpperCamelCase__ : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: UpperCamelCase__ : int = True UpperCamelCase__ : Optional[Any] = 150 UpperCamelCase__ : int = 'huggingface/label-files' UpperCamelCase__ : List[Any] = 'ade20k-id2label.json' UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r')) UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()} UpperCamelCase__ : Union[str, Any] = idalabel UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Any = [1, 150, 480, 480] return config, expected_shape def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder') if "pretrained.model" in name: UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings') if "patch_embed" in name: UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings') if "pos_embed" in name: UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings') if "attn.proj" in name: UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense') if "proj" in name and "project" not in name: UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection') if "blocks" in name: UpperCamelCase__ : int = name.replace('blocks' , 'layer') if "mlp.fc1" in name: UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense') if "mlp.fc2" in name: UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense') if "norm1" in name: UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before') if "norm2" in name: UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after') if "scratch.output_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head') if "scratch" in name: UpperCamelCase__ : int = name.replace('scratch' , 'neck') if "layer1_rn" in name: UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0') if "layer2_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1') if "layer3_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2') if "layer4_rn" in name: UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3') if "refinenet" in name: UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}') if "out_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection') if "resConfUnit1" in name: UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1') if "resConfUnit2" in name: UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2') if "conv1" in name: UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1') if "conv2" in name: UpperCamelCase__ : int = name.replace('conv2' , 'convolution2') # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0') if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0') if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0') if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0') # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection') if "pretrained.act_postprocess1.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize') if "pretrained.act_postprocess2.3" in name: UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection') if "pretrained.act_postprocess2.4" in name: UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize') if "pretrained.act_postprocess3.3" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection') if "pretrained.act_postprocess4.3" in name: UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection') if "pretrained.act_postprocess4.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize') if "pretrained" in name: UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt') if "bn" in name: UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm') if "head" in name: UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head') if "encoder.norm" in name: UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm') if "auxlayer" in name: UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head') return name def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any: for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight') UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias') # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :] UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size] UpperCamelCase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw) return im @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_) # load original state_dict from URL UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu') # remove certain keys remove_ignore_keys_(lowerCamelCase_) # rename keys for key in state_dict.copy().keys(): UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_) UpperCamelCase__ : List[str] = val # read in qkv matrices read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_) # load HuggingFace model UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_) model.load_state_dict(lowerCamelCase_) model.eval() # Check outputs on an image UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384 UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_) UpperCamelCase__ : int = prepare_img() UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt') # forward pass UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth # Assert logits UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]]) if "ade" in checkpoint_url: UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]]) assert outputs.shape == torch.Size(lowerCamelCase_) assert ( torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_) ) Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase_) if push_to_hub: print('Pushing model to hub...') model.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) lowerCAmelCase__ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
6
0
'''simple docstring''' import argparse import datetime def __UpperCAmelCase ( lowerCamelCase_) -> str: UpperCamelCase__ : int = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } UpperCamelCase__ : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase_) < 11: raise ValueError('Must be 10 characters long') # Get month UpperCamelCase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12') UpperCamelCase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'') # Get day UpperCamelCase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31') # Get second separator UpperCamelCase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'') # Get year UpperCamelCase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?') # Get datetime obj for validation UpperCamelCase__ : int = datetime.date(int(lowerCamelCase_) , int(lowerCamelCase_) , int(lowerCamelCase_)) # Start math if m <= 2: UpperCamelCase__ : int = y - 1 UpperCamelCase__ : List[Any] = m + 12 # maths var UpperCamelCase__ : int = int(str(lowerCamelCase_)[:2]) UpperCamelCase__ : int = int(str(lowerCamelCase_)[2:]) UpperCamelCase__ : int = int(2.6 * m - 5.39) UpperCamelCase__ : int = int(c / 4) UpperCamelCase__ : int = int(k / 4) UpperCamelCase__ : int = int(d + k) UpperCamelCase__ : int = int(t + u + v + x) UpperCamelCase__ : int = int(z - (2 * c)) UpperCamelCase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.') # Response UpperCamelCase__ : str = f'Your date {date_input}, is a {days[str(lowerCamelCase_)]}!' return response if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) lowerCAmelCase__ = parser.parse_args() zeller(args.date_input)
705
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ): UpperCamelCase__ : Tuple = parent UpperCamelCase__ : List[str] = batch_size UpperCamelCase__ : Optional[Any] = image_size UpperCamelCase__ : Optional[Any] = patch_size UpperCamelCase__ : List[str] = num_channels UpperCamelCase__ : Union[str, Any] = is_training UpperCamelCase__ : int = use_labels UpperCamelCase__ : Optional[int] = hidden_size UpperCamelCase__ : Any = num_hidden_layers UpperCamelCase__ : str = num_attention_heads UpperCamelCase__ : str = intermediate_size UpperCamelCase__ : Union[str, Any] = hidden_act UpperCamelCase__ : Optional[int] = hidden_dropout_prob UpperCamelCase__ : Tuple = attention_probs_dropout_prob UpperCamelCase__ : Any = type_sequence_label_size UpperCamelCase__ : int = initializer_range UpperCamelCase__ : Optional[int] = mask_ratio UpperCamelCase__ : int = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase__ : str = (image_size // patch_size) ** 2 UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCamelCase__ : List[str] = None if self.use_labels: UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCamelCase__ : Any = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : List[Any]): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]): UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple): UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Dict = model(UpperCAmelCase_) UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2 UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) # test greyscale images UpperCamelCase__ : List[Any] = 1 UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[str] = self.prepare_config_and_inputs() UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs UpperCamelCase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): _lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () _lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : List[str] = ViTMAEModelTester(self) UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37) def __UpperCamelCase ( self : Any): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds') def __UpperCamelCase ( self : Tuple): pass def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) UpperCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear)) def __UpperCamelCase ( self : List[str]): UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_) UpperCamelCase__ : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ : Any = [*signature.parameters.keys()] UpperCamelCase__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]): # make masks reproducible np.random.seed(2) UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase__ : List[str] = pt_noise super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) UpperCamelCase__ : Dict = outputs[0].cpu().numpy() UpperCamelCase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_) model.to(UpperCAmelCase_) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) # Make sure we don't have nans UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy() UpperCamelCase__ : Any = 0 UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(UpperCAmelCase_ , 1e-5) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Tuple): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Optional[int]): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Tuple): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load') def __UpperCamelCase ( self : Tuple): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self : Optional[int]): pass @slow def __UpperCamelCase ( self : Optional[Any]): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class __lowercase (unittest.TestCase ): @cached_property def __UpperCamelCase ( self : int): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None @slow def __UpperCamelCase ( self : str): # make random mask reproducible across the PT and TF model np.random.seed(2) UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.default_image_processor UpperCamelCase__ : Dict = prepare_img() UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase__ : Union[str, Any] = ViTMAEConfig() UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_)) # verify the logits UpperCamelCase__ : Tuple = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape , UpperCAmelCase_) UpperCamelCase__ : Any = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]]) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
6
0
'''simple docstring''' from pathlib import Path import fire def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : List[str] = Path(lowerCamelCase_) UpperCamelCase__ : Any = Path(lowerCamelCase_) dest_dir.mkdir(exist_ok=lowerCamelCase_) for path in src_dir.iterdir(): UpperCamelCase__ : List[str] = [x.rstrip() for x in list(path.open().readlines())][:n] UpperCamelCase__ : Optional[Any] = dest_dir.joinpath(path.name) print(lowerCamelCase_) dest_path.open('w').write('\n'.join(lowerCamelCase_)) if __name__ == "__main__": fire.Fire(minify)
706
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowercase (metaclass=__lowerCamelCase ): _lowerCamelCase = ['''torch''', '''scipy'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int): requires_backends(self , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]): requires_backends(cls , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any): requires_backends(cls , ['torch', 'scipy'])
6
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = ['''input_ids''', '''attention_mask'''] _lowerCamelCase = None def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=False , **UpperCAmelCase_ : int , ): super().__init__( UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , ) UpperCamelCase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_) != add_prefix_space: UpperCamelCase__ : List[str] = getattr(UpperCAmelCase_ , pre_tok_state.pop('type')) UpperCamelCase__ : int = add_prefix_space UpperCamelCase__ : List[str] = pre_tok_class(**UpperCAmelCase_) UpperCamelCase__ : List[str] = add_prefix_space def __UpperCamelCase ( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple): UpperCamelCase__ : int = kwargs.get('is_split_into_words' , UpperCAmelCase_) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' ' pretokenized inputs.') return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict): UpperCamelCase__ : Any = kwargs.get('is_split_into_words' , UpperCAmelCase_) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' ' pretokenized inputs.') return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): UpperCamelCase__ : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_) return tuple(UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : "Conversation"): UpperCamelCase__ : str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) + [self.eos_token_id]) if len(UpperCAmelCase_) > self.model_max_length: UpperCamelCase__ : Tuple = input_ids[-self.model_max_length :] return input_ids
707
'''simple docstring''' class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False): # Mapping from the first character of the prefix of the node UpperCamelCase__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word UpperCamelCase__ : List[Any] = is_leaf UpperCamelCase__ : Optional[Any] = prefix def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[int] = 0 for q, w in zip(self.prefix , UpperCAmelCase_): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]): for word in words: self.insert(UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: UpperCamelCase__ : Optional[Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_) else: UpperCamelCase__ : int = self.nodes[word[0]] UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match( UpperCAmelCase_) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(UpperCAmelCase_) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: UpperCamelCase__ : Tuple = remaining_prefix UpperCamelCase__ : str = self.nodes[matching_string[0]] UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : str = aux_node if remaining_word == "": UpperCamelCase__ : int = True else: self.nodes[matching_string[0]].insert(UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_) if not incoming_node: return False else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match( UpperCAmelCase_) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(UpperCAmelCase_) def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_) if not incoming_node: return False else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match( UpperCAmelCase_) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(UpperCAmelCase_) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes) == 1 and not self.is_leaf: UpperCamelCase__ : List[str] = list(self.nodes.values())[0] UpperCamelCase__ : Tuple = merging_node.is_leaf self.prefix += merging_node.prefix UpperCamelCase__ : Tuple = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes) > 1: UpperCamelCase__ : str = False # If there is 1 edge, we merge it with its child else: UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0] UpperCamelCase__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix UpperCamelCase__ : Union[str, Any] = merging_node.nodes return True def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0): if self.prefix != "": print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '') for value in self.nodes.values(): value.print_tree(height + 1) def __UpperCAmelCase ( ) -> bool: UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split() UpperCamelCase__ : List[Any] = RadixNode() root.insert_many(lowerCamelCase_) assert all(root.find(lowerCamelCase_) for word in words) assert not root.find('bandanas') assert not root.find('apps') root.delete('all') assert not root.find('all') root.delete('banana') assert not root.find('banana') assert root.find('bananas') return True def __UpperCAmelCase ( ) -> None: assert test_trie() def __UpperCAmelCase ( ) -> None: UpperCamelCase__ : List[Any] = RadixNode() UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split() root.insert_many(lowerCamelCase_) print('Words:' , lowerCamelCase_) print('Tree:') root.print_tree() if __name__ == "__main__": main()
6
0
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1_337 , num_examples=42 , dataset_name='my_dataset')}), SplitDict({'train': SplitInfo(name='train' , num_bytes=1_337 , num_examples=42)}), SplitDict({'train': SplitInfo()}), ] , ) def __UpperCAmelCase ( lowerCamelCase_) -> List[str]: UpperCamelCase__ : Any = split_dict._to_yaml_list() assert len(lowerCamelCase_) == len(lowerCamelCase_) UpperCamelCase__ : Union[str, Any] = SplitDict._from_yaml_list(lowerCamelCase_) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump UpperCamelCase__ : Dict = None # the split name of split_dict takes over the name of the split info object UpperCamelCase__ : List[str] = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCamelCase_), SplitInfo(dataset_name='my_dataset')]) def __UpperCAmelCase ( lowerCamelCase_) -> Any: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files UpperCamelCase__ : List[str] = asdict(SplitDict({'train': split_info})) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
708
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase__ = False class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : int): return 12 @property def __UpperCamelCase ( self : Tuple): return 12 @property def __UpperCamelCase ( self : Dict): return 32 @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def __UpperCamelCase ( self : List[str]): torch.manual_seed(0) UpperCamelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = 12 UpperCamelCase__ : Dict = 12 UpperCamelCase__ : Union[str, Any] = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_) return model def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = 'cpu' UpperCamelCase__ : List[str] = self.dummy_vqvae UpperCamelCase__ : List[str] = self.dummy_text_encoder UpperCamelCase__ : Optional[int] = self.dummy_tokenizer UpperCamelCase__ : List[str] = self.dummy_transformer UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_) UpperCamelCase__ : int = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Optional[int] = 'cpu' UpperCamelCase__ : str = self.dummy_vqvae UpperCamelCase__ : Any = self.dummy_text_encoder UpperCamelCase__ : List[Any] = self.dummy_tokenizer UpperCamelCase__ : Dict = self.dummy_transformer UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) UpperCamelCase__ : str = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : str = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : int = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy') UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq') UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_) pipeline.set_progress_bar_config(disable=UpperCAmelCase_) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : int = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , ) UpperCamelCase__ : int = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
6
0
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def __UpperCAmelCase ( lowerCamelCase_) -> str: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: UpperCamelCase__ : Union[str, Any] = k.replace(lowerCamelCase_ , lowerCamelCase_) if k.startswith('encoder'): UpperCamelCase__ : Any = k.replace('.attn' , '.self_attn') UpperCamelCase__ : Optional[Any] = k.replace('norm1' , 'self_attn_layer_norm') UpperCamelCase__ : List[Any] = k.replace('norm2' , 'final_layer_norm') elif k.startswith('decoder'): UpperCamelCase__ : int = k.replace('norm1' , 'self_attn_layer_norm') UpperCamelCase__ : List[Any] = k.replace('norm2' , 'encoder_attn_layer_norm') UpperCamelCase__ : List[Any] = k.replace('norm3' , 'final_layer_norm') return k def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : Tuple = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: UpperCamelCase__ : Optional[int] = sd.pop(lowerCamelCase_) UpperCamelCase__ : List[str] = k.replace('layernorm_embedding' , 'layer_norm') assert new_k not in sd UpperCamelCase__ : Union[str, Any] = v lowerCAmelCase__ = ['START'] @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu') UpperCamelCase__ : int = model['model'] UpperCamelCase__ : Optional[int] = BlenderbotConfig.from_json_file(lowerCamelCase_) UpperCamelCase__ : Dict = BlenderbotForConditionalGeneration(lowerCamelCase_) UpperCamelCase__ : int = m.model.state_dict().keys() UpperCamelCase__ : Tuple = [] UpperCamelCase__ : Tuple = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue UpperCamelCase__ : List[Any] = rename_state_dict_key(lowerCamelCase_) if new_k not in valid_keys: failures.append([k, new_k]) else: UpperCamelCase__ : Dict = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCamelCase_) m.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_) m.half() m.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) lowerCAmelCase__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
709
'''simple docstring''' import numpy as np from PIL import Image def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : str = 0 # compute the shape of the output matrix UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape)) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size]) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[int] = 0 return updated_arr def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : Tuple = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : List[str] = 0 UpperCamelCase__ : List[Any] = 0 # compute the shape of the output matrix UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape)) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size])) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : Union[str, Any] = 0 UpperCamelCase__ : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image lowerCAmelCase__ = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
6
0
from __future__ import annotations def __UpperCAmelCase ( lowerCamelCase_) -> None: create_state_space_tree(lowerCamelCase_ , [] , 0 , [0 for i in range(len(lowerCamelCase_))]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None: if index == len(lowerCamelCase_): print(lowerCamelCase_) return for i in range(len(lowerCamelCase_)): if not index_used[i]: current_sequence.append(sequence[i]) UpperCamelCase__ : List[str] = True create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_) current_sequence.pop() UpperCamelCase__ : Dict = False lowerCAmelCase__ = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase__ = ['A', 'B', 'C'] generate_all_permutations(sequence_a)
710
'''simple docstring''' from __future__ import annotations class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]): UpperCamelCase__ : int = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.') if len(UpperCAmelCase_) != 0: UpperCamelCase__ : str = len(rows[0]) if cols == 0: raise error for row in rows: if len(UpperCAmelCase_) != cols: raise error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise error UpperCamelCase__ : Optional[int] = rows else: UpperCamelCase__ : Optional[Any] = [] def __UpperCamelCase ( self : Union[str, Any]): return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property def __UpperCamelCase ( self : Dict): return len(self.rows) @property def __UpperCamelCase ( self : Tuple): return len(self.rows[0]) @property def __UpperCamelCase ( self : List[Any]): return (self.num_rows, self.num_columns) @property def __UpperCamelCase ( self : Any): return self.order[0] == self.order[1] def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[int] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : Dict): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0]) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0])) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns)) def __UpperCamelCase ( self : str): return bool(self.determinant()) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int): UpperCamelCase__ : Optional[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns) if other_column != column ] for other_row in range(self.num_rows) if other_row != row ] return Matrix(UpperCAmelCase_).determinant() def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int): if (row + column) % 2 == 0: return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : List[Any]): return Matrix( [ [self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)] for row in range(self.num_rows) ]) def __UpperCamelCase ( self : Optional[int]): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns) ] for row in range(self.minors().num_rows) ]) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse') return self.adjugate() * (1 / determinant) def __repr__( self : Any): return str(self.rows) def __str__( self : List[Any]): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]' for row in self.rows ]) + "]" ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix') if position is None: self.rows.append(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:] def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : int = TypeError( 'Column must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in column: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix') if position is None: UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)] else: UpperCamelCase__ : str = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows) ] def __eq__( self : List[Any] , UpperCAmelCase_ : object): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): return NotImplemented return self.rows == other.rows def __ne__( self : Any , UpperCAmelCase_ : object): return not self == other def __neg__( self : Union[str, Any]): return self * -1 def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Addition requires matrices of the same order') return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __sub__( self : Tuple , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order') return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float): if isinstance(UpperCAmelCase_ , (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows]) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second') return Matrix( [ [Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()] for row in self.rows ]) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix') def __pow__( self : Dict , UpperCAmelCase_ : int): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise TypeError('A Matrix can only be raised to the power of an int') if not self.is_square: raise ValueError('Only square matrices can be raised to a power') if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power') UpperCamelCase__ : str = self for _ in range(other - 1): result *= self return result @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]): return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_))) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' class __lowercase : def __init__( self : Dict , UpperCAmelCase_ : int): UpperCamelCase__ : str = n UpperCamelCase__ : List[str] = [None] * self.n UpperCamelCase__ : Optional[int] = 0 # index of the first element UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : Union[str, Any] = 0 def __len__( self : List[Any]): return self.size def __UpperCamelCase ( self : int): return self.size == 0 def __UpperCamelCase ( self : Tuple): return False if self.is_empty() else self.array[self.front] def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple): if self.size >= self.n: raise Exception('QUEUE IS FULL') UpperCamelCase__ : int = data UpperCamelCase__ : Union[str, Any] = (self.rear + 1) % self.n self.size += 1 return self def __UpperCamelCase ( self : List[str]): if self.size == 0: raise Exception('UNDERFLOW') UpperCamelCase__ : str = self.array[self.front] UpperCamelCase__ : Optional[Any] = None UpperCamelCase__ : str = (self.front + 1) % self.n self.size -= 1 return temp
711
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowercase : def __UpperCamelCase ( self : Union[str, Any]): torch.manual_seed(0) UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : List[str] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Dict): torch.manual_seed(0) UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : str = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[str] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Any): UpperCamelCase__ : Dict = self.get_dummy_components() UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = inputs['prompt'] UpperCamelCase__ : List[Any] = inputs['generator'] UpperCamelCase__ : Tuple = inputs['num_inference_steps'] UpperCamelCase__ : List[Any] = inputs['output_type'] if "image" in inputs: UpperCamelCase__ : Tuple = inputs['image'] else: UpperCamelCase__ : Union[str, Any] = None if "mask_image" in inputs: UpperCamelCase__ : Optional[int] = inputs['mask_image'] else: UpperCamelCase__ : int = None if "original_image" in inputs: UpperCamelCase__ : List[Any] = inputs['original_image'] else: UpperCamelCase__ : Optional[Any] = None UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_) # inputs with prompt converted to embeddings UpperCamelCase__ : List[Any] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Dict = image if mask_image is not None: UpperCamelCase__ : Optional[int] = mask_image if original_image is not None: UpperCamelCase__ : Union[str, Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , ) UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = inputs['generator'] UpperCamelCase__ : List[Any] = inputs['num_inference_steps'] UpperCamelCase__ : Optional[int] = inputs['output_type'] # inputs with prompt converted to embeddings UpperCamelCase__ : Any = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Tuple = image if mask_image is not None: UpperCamelCase__ : Union[str, Any] = mask_image if original_image is not None: UpperCamelCase__ : str = original_image UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Any = self.get_dummy_components() UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4)
6
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''vit_mae''' def __init__( self : List[Any] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Dict=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : Any=2_048 , UpperCAmelCase_ : List[Any]=0.75 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : str , ): super().__init__(**UpperCAmelCase_) UpperCamelCase__ : Tuple = hidden_size UpperCamelCase__ : Optional[int] = num_hidden_layers UpperCamelCase__ : Optional[Any] = num_attention_heads UpperCamelCase__ : List[str] = intermediate_size UpperCamelCase__ : List[str] = hidden_act UpperCamelCase__ : int = hidden_dropout_prob UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob UpperCamelCase__ : int = initializer_range UpperCamelCase__ : Optional[Any] = layer_norm_eps UpperCamelCase__ : Optional[int] = image_size UpperCamelCase__ : List[Any] = patch_size UpperCamelCase__ : str = num_channels UpperCamelCase__ : Dict = qkv_bias UpperCamelCase__ : Tuple = decoder_num_attention_heads UpperCamelCase__ : int = decoder_hidden_size UpperCamelCase__ : Any = decoder_num_hidden_layers UpperCamelCase__ : Optional[int] = decoder_intermediate_size UpperCamelCase__ : List[str] = mask_ratio UpperCamelCase__ : Dict = norm_pix_loss
712
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase__ = 3 def __UpperCAmelCase ( lowerCamelCase_) -> int: print('Generating primitive root of p') while True: UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_) if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1: continue if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1: continue return g def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('Generating prime p...') UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number. UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p. UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety. UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_) UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p) UpperCamelCase__ : Optional[Any] = (key_size, d) return public_key, private_key def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None: if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'): print('\nWARNING:') print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' 'Use a different name or delete these files and re-run this program.') sys.exit() UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_) print(f'\nWriting public key to file {name}_pubkey.txt...') with open(f'{name}_pubkey.txt' , 'w') as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}') print(f'Writing private key to file {name}_privkey.txt...') with open(f'{name}_privkey.txt' , 'w') as fo: fo.write(f'{private_key[0]},{private_key[1]}') def __UpperCAmelCase ( ) -> None: print('Making key files...') make_key_files('elgamal' , 2_048) print('Key files generation successful') if __name__ == "__main__": main()
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> tuple[float, float]: # Check if the input is valid if not len(lowerCamelCase_) == len(lowerCamelCase_) == 3: raise ValueError('Please enter a valid equation.') if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.') # Extract the coefficients UpperCamelCase__ : str = equationa UpperCamelCase__ : List[Any] = equationa # Calculate the determinants of the matrices UpperCamelCase__ : Optional[int] = aa * ba - aa * ba UpperCamelCase__ : Union[str, Any] = ca * ba - ca * ba UpperCamelCase__ : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)') else: raise ValueError('No solution. (Inconsistent system)') else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: UpperCamelCase__ : Tuple = determinant_x / determinant UpperCamelCase__ : Optional[int] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
713
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase__ = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: for attribute in key.split('.'): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCamelCase__ : str = 'lm_head' UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_) if weight_type is not None: UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape else: UpperCamelCase__ : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": UpperCamelCase__ : Optional[Any] = value elif weight_type == "weight_g": UpperCamelCase__ : Union[str, Any] = value elif weight_type == "weight_v": UpperCamelCase__ : List[Any] = value elif weight_type == "bias": UpperCamelCase__ : Any = value else: UpperCamelCase__ : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : List[Any] = [] UpperCamelCase__ : int = fairseq_model.state_dict() UpperCamelCase__ : int = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]: UpperCamelCase__ : Any = True if "*" in mapped_key: UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2] UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_) if "weight_g" in name: UpperCamelCase__ : int = 'weight_g' elif "weight_v" in name: UpperCamelCase__ : Any = 'weight_v' elif "bias" in name: UpperCamelCase__ : Union[str, Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase__ : Any = 'weight' else: UpperCamelCase__ : Tuple = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) continue if not is_used: unused_weights.append(lowerCamelCase_) logger.warning(f'Unused weights: {unused_weights}') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1] UpperCamelCase__ : List[Any] = name.split('.') UpperCamelCase__ : Any = int(items[0]) UpperCamelCase__ : int = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) UpperCamelCase__ : Tuple = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) UpperCamelCase__ : int = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) UpperCamelCase__ : Optional[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) UpperCamelCase__ : List[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(lowerCamelCase_) @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple: if config_path is not None: UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_) else: UpperCamelCase__ : int = UniSpeechConfig() if is_finetuned: if dict_path: UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase__ : List[Any] = target_dict.pad_index UpperCamelCase__ : Dict = target_dict.bos_index UpperCamelCase__ : Union[str, Any] = target_dict.eos_index UpperCamelCase__ : Tuple = len(target_dict.symbols) UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json') if not os.path.isdir(lowerCamelCase_): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_)) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_) UpperCamelCase__ : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase__ : Any = 42 UpperCamelCase__ : List[str] = 43 with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle: json.dump(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , ) UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_) processor.save_pretrained(lowerCamelCase_) UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_) else: UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_) if is_finetuned: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path}) else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) UpperCamelCase__ : int = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) hf_unispeech.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase__ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> list: UpperCamelCase__ : Tuple = len(lowerCamelCase_) UpperCamelCase__ : int = [[0] * n for i in range(lowerCamelCase_)] for i in range(lowerCamelCase_): UpperCamelCase__ : Dict = y_points[i] for i in range(2 , lowerCamelCase_): for j in range(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase__ : Tuple = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
714
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[str]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Union[str, Any] = 1 UpperCamelCase__ : Union[str, Any] = 3 UpperCamelCase__ : Dict = (32, 32) UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_) return image @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCamelCase ( self : str): torch.manual_seed(0) UpperCamelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[Any]): def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict): class __lowercase : def __init__( self : List[Any]): UpperCamelCase__ : Optional[Any] = torch.ones([0]) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int): self.pixel_values.to(UpperCAmelCase_) return self return Out() return extract def __UpperCamelCase ( self : str): UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : Any = self.dummy_cond_unet UpperCamelCase__ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) UpperCamelCase__ : List[str] = self.dummy_vae UpperCamelCase__ : str = self.dummy_text_encoder UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Tuple = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : int = self.dummy_cond_unet UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Optional[int] = self.dummy_vae UpperCamelCase__ : Optional[int] = self.dummy_text_encoder UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Dict = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) assert isinstance(pipe.scheduler , UpperCAmelCase_) assert pipe.safety_checker is None UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Dict = self.dummy_cond_unet UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Any = self.dummy_vae UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # put models in fp16 UpperCamelCase__ : Any = unet.half() UpperCamelCase__ : Tuple = vae.half() UpperCamelCase__ : Optional[int] = bert.half() # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[int] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger' UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) UpperCamelCase__ : Any = 4_003_660_346 UpperCamelCase__ : Any = 7 # without safety guidance (sld_guidance_scale = 0) UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : str = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # without safety guidance (strong configuration) UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Dict = output.images UpperCamelCase__ : str = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity' UpperCamelCase__ : Tuple = 2_734_971_755 UpperCamelCase__ : Tuple = 7 UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : int = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : int = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5') UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : int = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) UpperCamelCase__ : Any = 1_044_355_234 UpperCamelCase__ : Optional[int] = 12 UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7 UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : List[str] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
6
0
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') model.to(UpperCAmelCase_) from datasets import load_dataset UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo') UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB') UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_) UpperCamelCase__ : Tuple = outputs.logits UpperCamelCase__ : str = torch.Size((1, 16)) self.assertEqual(logits.shape , UpperCAmelCase_) UpperCamelCase__ : Tuple = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
715
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCAmelCase__ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowerCAmelCase__ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __UpperCAmelCase ( ) -> Union[str, Any]: UpperCamelCase__ : Optional[Any] = ( list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1)) ) UpperCamelCase__ : List[Any] = bs[:] UpperCamelCase__ : Optional[int] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase_) cs.append(2**8 + n) n += 1 UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_)) def __UpperCAmelCase ( lowerCamelCase_) -> Tuple: UpperCamelCase__ : Any = set() UpperCamelCase__ : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char)) UpperCamelCase__ : str = char return pairs class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ): UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle: UpperCamelCase__ : Any = json.load(UpperCAmelCase_) UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()} UpperCamelCase__ : Any = errors # how to handle errors in decoding UpperCamelCase__ : Tuple = bytes_to_unicode() UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle: UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1] UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges] UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Dict = {} UpperCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __UpperCamelCase ( self : Tuple): return len(self.encoder) def __UpperCamelCase ( self : Tuple): return dict(self.encoder , **self.added_tokens_encoder) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]): if token in self.cache: return self.cache[token] UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_) UpperCamelCase__ : int = get_pairs(UpperCAmelCase_) if not pairs: return token while True: UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf'))) if bigram not in self.bpe_ranks: break UpperCamelCase__, UpperCamelCase__ : Tuple = bigram UpperCamelCase__ : Dict = [] UpperCamelCase__ : Optional[int] = 0 while i < len(UpperCAmelCase_): try: UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) UpperCamelCase__ : Any = j if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_) UpperCamelCase__ : Dict = new_word if len(UpperCAmelCase_) == 1: break else: UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : List[Any] = word return word def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any): UpperCamelCase__ : Optional[Any] = [] for token in re.findall(self.pat , UpperCAmelCase_): UpperCamelCase__ : Optional[int] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' ')) return bpe_tokens def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token)) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]): return self.decoder.get(UpperCAmelCase_) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int): UpperCamelCase__ : int = ''.join(UpperCAmelCase_) UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): if not os.path.isdir(UpperCAmelCase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return UpperCamelCase__ : str = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n') UpperCamelCase__ : str = 0 with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!') UpperCamelCase__ : List[Any] = token_index writer.write(' '.join(UpperCAmelCase_) + '\n') index += 1 return vocab_file, merge_file def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_)) + [1] return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1] def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Any = [self.sep_token_id] UpperCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]): UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()): UpperCamelCase__ : str = ' ' + text return (text, kwargs) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): return token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"): UpperCamelCase__ : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text) else: # Generated responses should contain them already. inputs.append(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : int = self.encode(UpperCAmelCase_) if len(UpperCAmelCase_) > self.model_max_length: UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.') return input_ids
6
0
'''simple docstring''' import os def __UpperCAmelCase ( lowerCamelCase_ = "input.txt") -> int: with open(os.path.join(os.path.dirname(lowerCamelCase_) , lowerCamelCase_)) as input_file: UpperCamelCase__ : Tuple = [ [int(lowerCamelCase_) for element in line.split(',')] for line in input_file.readlines() ] UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase_) UpperCamelCase__ : str = len(matrix[0]) UpperCamelCase__ : str = [[-1 for _ in range(lowerCamelCase_)] for _ in range(lowerCamelCase_)] for i in range(lowerCamelCase_): UpperCamelCase__ : Dict = matrix[i][0] for j in range(1 , lowerCamelCase_): for i in range(lowerCamelCase_): UpperCamelCase__ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowerCamelCase_): UpperCamelCase__ : Optional[int] = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j]) for i in range(rows - 2 , -1 , -1): UpperCamelCase__ : List[Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j]) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums) if __name__ == "__main__": print(f'''{solution() = }''')
716
'''simple docstring''' import requests from bsa import BeautifulSoup def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str: UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser') UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_).find('span').text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') model.to(UpperCAmelCase_) from datasets import load_dataset UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo') UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB') UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_) UpperCamelCase__ : Tuple = outputs.logits UpperCamelCase__ : str = torch.Size((1, 16)) self.assertEqual(logits.shape , UpperCAmelCase_) UpperCamelCase__ : Tuple = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
6
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowerCAmelCase__ = logging.get_logger(__name__) @dataclass class __lowercase (__lowerCamelCase ): _lowerCamelCase = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: UpperCamelCase__ : List[str] = deprecated_arg[3:] UpperCamelCase__ : str = not kwargs.pop(UpperCAmelCase_) logger.warning( F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}') UpperCamelCase__ : Optional[int] = kwargs.pop('tpu_name' , self.tpu_name) UpperCamelCase__ : Union[str, Any] = kwargs.pop('device_idx' , self.device_idx) UpperCamelCase__ : int = kwargs.pop('eager_mode' , self.eager_mode) UpperCamelCase__ : str = kwargs.pop('use_xla' , self.use_xla) super().__init__(**UpperCAmelCase_) _lowerCamelCase = field( default=__lowerCamelCase , metadata={'''help''': '''Name of TPU'''} , ) _lowerCamelCase = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) _lowerCamelCase = field(default=__lowerCamelCase , metadata={'''help''': '''Benchmark models in eager model.'''} ) _lowerCamelCase = field( default=__lowerCamelCase , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def __UpperCamelCase ( self : Dict): requires_backends(self , ['tf']) UpperCamelCase__ : Dict = None if self.tpu: try: if self.tpu_name: UpperCamelCase__ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name) else: UpperCamelCase__ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: UpperCamelCase__ : int = None return tpu @cached_property def __UpperCamelCase ( self : Union[str, Any]): requires_backends(self , ['tf']) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu) UpperCamelCase__ : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU') UpperCamelCase__ : int = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}') else: tf.config.set_visible_devices([] , 'GPU') # disable GPU UpperCamelCase__ : List[Any] = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}') return strategy @property def __UpperCamelCase ( self : Any): requires_backends(self , ['tf']) return self._setup_tpu is not None @property def __UpperCamelCase ( self : Any): requires_backends(self , ['tf']) return self._setup_strategy @property def __UpperCamelCase ( self : int): requires_backends(self , ['tf']) return tf.config.list_physical_devices('GPU') @property def __UpperCamelCase ( self : Optional[int]): requires_backends(self , ['tf']) if self.cuda: return len(self.gpu_list) return 0 @property def __UpperCamelCase ( self : List[Any]): return self.n_gpu > 0
718
'''simple docstring''' import argparse import struct import unittest class __lowercase : def __init__( self : Tuple , UpperCAmelCase_ : bytes): UpperCamelCase__ : Dict = data # Initialize hash values UpperCamelCase__ : Any = [ 0X6A_09E_667, 0XBB_67A_E85, 0X3C_6EF_372, 0XA5_4FF_53A, 0X51_0E5_27F, 0X9B_056_88C, 0X1F_83D_9AB, 0X5B_E0C_D19, ] # Initialize round constants UpperCamelCase__ : List[Any] = [ 0X42_8A2_F98, 0X71_374_491, 0XB5_C0F_BCF, 0XE9_B5D_BA5, 0X39_56C_25B, 0X59_F11_1F1, 0X92_3F8_2A4, 0XAB_1C5_ED5, 0XD8_07A_A98, 0X12_835_B01, 0X24_318_5BE, 0X55_0C7_DC3, 0X72_BE5_D74, 0X80_DEB_1FE, 0X9B_DC0_6A7, 0XC1_9BF_174, 0XE4_9B6_9C1, 0XEF_BE4_786, 0X0F_C19_DC6, 0X24_0CA_1CC, 0X2D_E92_C6F, 0X4A_748_4AA, 0X5C_B0A_9DC, 0X76_F98_8DA, 0X98_3E5_152, 0XA8_31C_66D, 0XB0_032_7C8, 0XBF_597_FC7, 0XC6_E00_BF3, 0XD5_A79_147, 0X06_CA6_351, 0X14_292_967, 0X27_B70_A85, 0X2E_1B2_138, 0X4D_2C6_DFC, 0X53_380_D13, 0X65_0A7_354, 0X76_6A0_ABB, 0X81_C2C_92E, 0X92_722_C85, 0XA2_BFE_8A1, 0XA8_1A6_64B, 0XC2_4B8_B70, 0XC7_6C5_1A3, 0XD1_92E_819, 0XD6_990_624, 0XF4_0E3_585, 0X10_6AA_070, 0X19_A4C_116, 0X1E_376_C08, 0X27_487_74C, 0X34_B0B_CB5, 0X39_1C0_CB3, 0X4E_D8A_A4A, 0X5B_9CC_A4F, 0X68_2E6_FF3, 0X74_8F8_2EE, 0X78_A56_36F, 0X84_C87_814, 0X8C_C70_208, 0X90_BEF_FFA, 0XA4_506_CEB, 0XBE_F9A_3F7, 0XC6_717_8F2, ] UpperCamelCase__ : Tuple = self.preprocessing(self.data) self.final_hash() @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : bytes): UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64)) UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8)) return data + padding + big_endian_integer def __UpperCamelCase ( self : Union[str, Any]): # Convert into blocks of 64 bytes UpperCamelCase__ : int = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data) , 64) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_)) # add 48 0-ed integers words += [0] * 48 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes for index in range(0 , 64): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase__ : Dict = ( self.ror(words[index - 15] , 7) ^ self.ror(words[index - 15] , 18) ^ (words[index - 15] >> 3) ) UpperCamelCase__ : Tuple = ( self.ror(words[index - 2] , 17) ^ self.ror(words[index - 2] , 19) ^ (words[index - 2] >> 10) ) UpperCamelCase__ : int = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25) UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g) UpperCamelCase__ : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22) UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase__ : Optional[Any] = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes) ] UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes]) def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int): return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : int): import hashlib UpperCamelCase__ : str = bytes('Test String' , 'utf-8') self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest()) def __UpperCAmelCase ( ) -> None: import doctest doctest.testmod() UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file') UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : str = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb') as f: UpperCamelCase__ : Any = f.read() else: UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8') print(SHAaaa(lowerCamelCase_).hash) if __name__ == "__main__": main()
6
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig lowerCAmelCase__ = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''tapas''' def __init__( self : Any , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : Dict=[3, 256, 256, 2, 256, 256, 10] , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : List[Any]=10.0 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]="ratio" , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Dict , ): super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCamelCase__ : List[str] = vocab_size UpperCamelCase__ : str = hidden_size UpperCamelCase__ : str = num_hidden_layers UpperCamelCase__ : Tuple = num_attention_heads UpperCamelCase__ : str = hidden_act UpperCamelCase__ : Optional[int] = intermediate_size UpperCamelCase__ : Any = hidden_dropout_prob UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob UpperCamelCase__ : Union[str, Any] = max_position_embeddings UpperCamelCase__ : Tuple = type_vocab_sizes UpperCamelCase__ : Dict = initializer_range UpperCamelCase__ : Optional[int] = layer_norm_eps # Fine-tuning task hyperparameters UpperCamelCase__ : Optional[int] = positive_label_weight UpperCamelCase__ : str = num_aggregation_labels UpperCamelCase__ : Union[str, Any] = aggregation_loss_weight UpperCamelCase__ : List[str] = use_answer_as_supervision UpperCamelCase__ : List[str] = answer_loss_importance UpperCamelCase__ : Tuple = use_normalized_answer_loss UpperCamelCase__ : Optional[int] = huber_loss_delta UpperCamelCase__ : Any = temperature UpperCamelCase__ : int = aggregation_temperature UpperCamelCase__ : str = use_gumbel_for_cells UpperCamelCase__ : Dict = use_gumbel_for_aggregation UpperCamelCase__ : List[Any] = average_approximation_function UpperCamelCase__ : Dict = cell_selection_preference UpperCamelCase__ : Any = answer_loss_cutoff UpperCamelCase__ : str = max_num_rows UpperCamelCase__ : Optional[Any] = max_num_columns UpperCamelCase__ : Tuple = average_logits_per_cell UpperCamelCase__ : Optional[int] = select_one_column UpperCamelCase__ : Union[str, Any] = allow_empty_column_selection UpperCamelCase__ : Tuple = init_cell_selection_weights_to_zero UpperCamelCase__ : Dict = reset_position_index_per_cell UpperCamelCase__ : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters UpperCamelCase__ : Dict = aggregation_labels UpperCamelCase__ : Optional[int] = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCAmelCase_): UpperCamelCase__ : Optional[int] = {int(UpperCAmelCase_): v for k, v in aggregation_labels.items()}
719
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase__ = 300 # TEMPERATURE (unit = K) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float: if donor_conc <= 0: raise ValueError('Donor concentration should be positive') elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive') elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive') elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration') elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration') else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]: UpperCamelCase__ : Dict = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : str = 0 while b > 0: if b & 1: UpperCamelCase__ : Tuple = ((res % c) + (a % c)) % c a += a b >>= 1 return res
720
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]: UpperCamelCase__ : int = [] if isinstance(lowerCamelCase_ , lowerCamelCase_): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , (list, tuple)): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , torch.Tensor): shapes.append(tree.shape) else: raise ValueError('Not supported') return shapes @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]: UpperCamelCase__ : int = [] for d in reversed(lowerCamelCase_): idx.append(flat_idx % d) UpperCamelCase__ : Any = flat_idx // d return tuple(reversed(lowerCamelCase_)) @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase_) -> None: UpperCamelCase__ : Tuple = True for i in range(len(lowerCamelCase_)): UpperCamelCase__ : List[Any] = -1 * (i + 1) l[reversed_idx] &= tally UpperCamelCase__ : Optional[Any] = l[reversed_idx] if start_edges is None: UpperCamelCase__ : int = [s == 0 for s in start] reduce_edge_list(lowerCamelCase_) if end_edges is None: UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)] reduce_edge_list(lowerCamelCase_) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase_) == 0: return [()] elif len(lowerCamelCase_) == 1: return [(slice(start[0] , end[0] + 1),)] UpperCamelCase__ : List[Tuple[slice, ...]] = [] UpperCamelCase__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase_ , lowerCamelCase_): if s == e: path_list.append(slice(lowerCamelCase_ , s + 1)) else: break UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_) UpperCamelCase__ : Dict = len(lowerCamelCase_) # start == end, and we're done if divergence_idx == len(lowerCamelCase_): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : str = start[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , sdi + 1),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , )) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : Optional[int] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , edi + 1),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , )) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),)) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),)) slices.extend(lower()) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper()) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),)) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper()) UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),)) slices.extend(lower()) return slices @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor: UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims] UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_)) # _get_minimal_slice_set is inclusive UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_)) # Get an ordered list of slices to perform UpperCamelCase__ : int = _get_minimal_slice_set( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) UpperCamelCase__ : List[Any] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any: if not (len(lowerCamelCase_) > 0): raise ValueError('Must provide at least one input') UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)] UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)]) def _prep_inputs(lowerCamelCase_) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims]) == no_batch_dims: UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:]) else: UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_) UpperCamelCase__ : int = None if _out is not None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out) UpperCamelCase__ : Dict = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase_) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[Any] = prepped_outputs for _ in range(lowerCamelCase_): # Chunk the input if not low_mem: UpperCamelCase__ : str = _select_chunk else: UpperCamelCase__ : List[Any] = partial( _chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , ) UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_) # Run the layer on the chunk UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_) # Allocate space for the output if out is None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase_ , lowerCamelCase_): def assign(lowerCamelCase_ , lowerCamelCase_) -> None: for k, v in da.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_): assign(lowerCamelCase_ , da[k]) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCamelCase__ : List[str] = da[k] assign(lowerCamelCase_ , lowerCamelCase_) elif isinstance(lowerCamelCase_ , lowerCamelCase_): for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCamelCase__ : int = xa elif isinstance(lowerCamelCase_ , torch.Tensor): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCamelCase__ : Dict = output_chunk else: raise ValueError('Not supported') i += chunk_size UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_) return out class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ): UpperCamelCase__ : str = max_chunk_size UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[tuple] = None def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int): logging.info('Tuning chunk size...') if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)] UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size] UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCAmelCase_ : int) -> bool: try: with torch.no_grad(): fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_) return True except RuntimeError: return False UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1 while i > min_viable_chunk_size_index: UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i]) if not viable: UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2 else: UpperCamelCase__ : Optional[int] = i UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2 return candidates[min_viable_chunk_size_index] def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable): UpperCamelCase__ : List[str] = True for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_): assert type(UpperCAmelCase_) == type(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , (list, tuple)): consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) else: consistent &= aa == aa return consistent def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ): UpperCamelCase__ : List[Any] = True UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data) == len(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_) else: # Otherwise, we can reuse the precomputed value UpperCamelCase__ : Optional[int] = False if not consistent: UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
6
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> Tuple: UpperCamelCase__ : Optional[int] = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight')) rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias')) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight')) rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias')) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias')) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'deit.embeddings.cls_token'), ('dist_token', 'deit.embeddings.distillation_token'), ('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'deit.embeddings.position_embeddings'), ]) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ]) # if just the base model, we should remove "deit" from all keys that start with "deit" UpperCamelCase__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit') else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('norm.weight', 'deit.layernorm.weight'), ('norm.bias', 'deit.layernorm.bias'), ('head.weight', 'cls_classifier.weight'), ('head.bias', 'cls_classifier.bias'), ('head_dist.weight', 'distillation_classifier.weight'), ('head_dist.bias', 'distillation_classifier.bias'), ]) return rename_keys def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any: for i in range(config.num_hidden_layers): if base_model: UpperCamelCase__ : int = '' else: UpperCamelCase__ : int = 'deit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight') UpperCamelCase__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias') # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : str = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ : List[str] = in_proj_bias[: config.hidden_size] UpperCamelCase__ : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : Any = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int: UpperCamelCase__ : Union[str, Any] = dct.pop(lowerCamelCase_) UpperCamelCase__ : Optional[Any] = val def __UpperCAmelCase ( ) -> str: UpperCamelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase__ : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw) return im @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads UpperCamelCase__ : Tuple = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size UpperCamelCase__ : Tuple = 1_000 UpperCamelCase__ : List[Any] = 'huggingface/label-files' UpperCamelCase__ : Any = 'imagenet-1k-id2label.json' UpperCamelCase__ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset') , 'r')) UpperCamelCase__ : str = {int(lowerCamelCase_): v for k, v in idalabel.items()} UpperCamelCase__ : Tuple = idalabel UpperCamelCase__ : int = {v: k for k, v in idalabel.items()} UpperCamelCase__ : int = int(deit_name[-6:-4]) UpperCamelCase__ : int = int(deit_name[-3:]) # size of the architecture if deit_name[9:].startswith('tiny'): UpperCamelCase__ : Union[str, Any] = 192 UpperCamelCase__ : int = 768 UpperCamelCase__ : List[str] = 12 UpperCamelCase__ : Union[str, Any] = 3 elif deit_name[9:].startswith('small'): UpperCamelCase__ : str = 384 UpperCamelCase__ : Union[str, Any] = 1_536 UpperCamelCase__ : List[str] = 12 UpperCamelCase__ : List[Any] = 6 if deit_name[9:].startswith('base'): pass elif deit_name[4:].startswith('large'): UpperCamelCase__ : List[Any] = 1_024 UpperCamelCase__ : Union[str, Any] = 4_096 UpperCamelCase__ : List[Any] = 24 UpperCamelCase__ : str = 16 # load original model from timm UpperCamelCase__ : Optional[Any] = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCamelCase__ : List[Any] = timm_model.state_dict() UpperCamelCase__ : List[Any] = create_rename_keys(lowerCamelCase_ , lowerCamelCase_) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) # load HuggingFace model UpperCamelCase__ : List[str] = DeiTForImageClassificationWithTeacher(lowerCamelCase_).eval() model.load_state_dict(lowerCamelCase_) # Check outputs on an image, prepared by DeiTImageProcessor UpperCamelCase__ : Dict = int( (256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 UpperCamelCase__ : int = DeiTImageProcessor(size=lowerCamelCase_ , crop_size=config.image_size) UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='pt') UpperCamelCase__ : Optional[Any] = encoding['pixel_values'] UpperCamelCase__ : str = model(lowerCamelCase_) UpperCamelCase__ : List[str] = timm_model(lowerCamelCase_) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1e-3) Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f'Saving model {deit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowerCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
721
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : int = tempfile.mkdtemp() # fmt: off UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'} UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(UpperCAmelCase_) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(UpperCAmelCase_)) UpperCamelCase__ : Dict = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]): return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : str): shutil.rmtree(self.tmpdirname) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Union[str, Any] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer() UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_slow.save_pretrained(self.tmpdirname) UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_) UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_fast.save_pretrained(self.tmpdirname) UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0) UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : int = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : int = self.prepare_image_inputs() UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np') UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Any = 'lower newer' UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def __UpperCamelCase ( self : int): UpperCamelCase__ : Optional[int] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'lower newer' UpperCamelCase__ : List[Any] = self.prepare_image_inputs() UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_): processor() def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : Union[str, Any] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'lower newer' UpperCamelCase__ : Optional[int] = self.prepare_image_inputs() UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
6
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __lowercase (__lowerCamelCase ): _lowerCamelCase = 42 class __lowercase (__lowerCamelCase , __lowerCamelCase ): @register_to_config def __init__( self : List[Any] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase_ : Tuple[int] = (64,) , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "silu" , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 256 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : float = 0.1_82_15 , UpperCAmelCase_ : str = "group" , ): super().__init__() # pass init params to Encoder UpperCamelCase__ : List[Any] = Encoder( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , down_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , double_z=UpperCAmelCase_ , ) UpperCamelCase__ : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCamelCase__ : str = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1) UpperCamelCase__ : Tuple = VectorQuantizer(UpperCAmelCase_ , UpperCAmelCase_ , beta=0.25 , remap=UpperCAmelCase_ , sane_index_shape=UpperCAmelCase_) UpperCamelCase__ : Any = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1) # pass init params to Decoder UpperCamelCase__ : List[str] = Decoder( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , up_block_types=UpperCAmelCase_ , block_out_channels=UpperCAmelCase_ , layers_per_block=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , norm_num_groups=UpperCAmelCase_ , norm_type=UpperCAmelCase_ , ) @apply_forward_hook def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True): UpperCamelCase__ : Dict = self.encoder(UpperCAmelCase_) UpperCamelCase__ : Dict = self.quant_conv(UpperCAmelCase_) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCAmelCase_) @apply_forward_hook def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True): # also go through quantization layer if not force_not_quantize: UpperCamelCase__ : List[str] = self.quantize(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = h UpperCamelCase__ : Union[str, Any] = self.post_quant_conv(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.decoder(UpperCAmelCase_ , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : bool = True): UpperCamelCase__ : str = sample UpperCamelCase__ : int = self.encode(UpperCAmelCase_).latents UpperCamelCase__ : Union[str, Any] = self.decode(UpperCAmelCase_).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCAmelCase_)
700
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None: UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_) for k, v in tqdm(state_dict.items()): if not isinstance(lowerCamelCase_ , torch.Tensor): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin') UpperCamelCase__ : int = v.half() if save_path is None: # overwrite src_path UpperCamelCase__ : List[Any] = src_path torch.save(lowerCamelCase_ , lowerCamelCase_) if __name__ == "__main__": fire.Fire(convert)
6
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4') UpperCamelCase__ : Optional[int] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) sd_pipe.set_scheduler('sample_euler') UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : List[Any] = torch.manual_seed(0) UpperCamelCase__ : Dict = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np') UpperCamelCase__ : Dict = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase__ : List[str] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : str): UpperCamelCase__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base') UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) sd_pipe.set_scheduler('sample_euler') UpperCamelCase__ : Tuple = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0) UpperCamelCase__ : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np') UpperCamelCase__ : Optional[int] = output.images UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase__ : str = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1 def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base') UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) sd_pipe.set_scheduler('sample_dpmpp_2m') UpperCamelCase__ : int = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0) UpperCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase_ , ) UpperCamelCase__ : List[Any] = output.images UpperCamelCase__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase__ : Union[str, Any] = np.array( [0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
701
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''segformer''' def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ): super().__init__(**UpperCAmelCase_) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , ) UpperCamelCase__ : List[Any] = num_channels UpperCamelCase__ : Any = num_encoder_blocks UpperCamelCase__ : Dict = depths UpperCamelCase__ : int = sr_ratios UpperCamelCase__ : str = hidden_sizes UpperCamelCase__ : List[str] = patch_sizes UpperCamelCase__ : Optional[int] = strides UpperCamelCase__ : Dict = mlp_ratios UpperCamelCase__ : List[str] = num_attention_heads UpperCamelCase__ : int = hidden_act UpperCamelCase__ : Any = hidden_dropout_prob UpperCamelCase__ : str = attention_probs_dropout_prob UpperCamelCase__ : List[str] = classifier_dropout_prob UpperCamelCase__ : List[Any] = initializer_range UpperCamelCase__ : Union[str, Any] = drop_path_rate UpperCamelCase__ : int = layer_norm_eps UpperCamelCase__ : Dict = decoder_hidden_size UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_) UpperCamelCase__ : List[str] = semantic_loss_ignore_index class __lowercase (__lowerCamelCase ): _lowerCamelCase = version.parse('''1.11''' ) @property def __UpperCamelCase ( self : Optional[Any]): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def __UpperCamelCase ( self : Optional[Any]): return 1e-4 @property def __UpperCamelCase ( self : Any): return 12
6
0
'''simple docstring''' import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''facebook/bart-large-mnli''' _lowerCamelCase = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) _lowerCamelCase = '''text_classifier''' _lowerCamelCase = AutoTokenizer _lowerCamelCase = AutoModelForSequenceClassification _lowerCamelCase = ['''text''', ['''text''']] _lowerCamelCase = ['''text'''] def __UpperCamelCase ( self : List[str]): super().setup() UpperCamelCase__ : List[str] = self.model.config UpperCamelCase__ : List[str] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): UpperCamelCase__ : int = int(UpperCAmelCase_) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int): UpperCamelCase__ : List[Any] = labels return self.pre_processor( [text] * len(UpperCAmelCase_) , [F'This example is {label}' for label in labels] , return_tensors='pt' , padding='max_length' , ) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple): UpperCamelCase__ : Tuple = outputs.logits UpperCamelCase__ : Union[str, Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
702
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]: return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)] if __name__ == "__main__": from doctest import testmod testmod()
6
0
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __lowercase (yaml.SafeLoader ): def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int]): UpperCamelCase__ : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCamelCase__ : str = [tuple(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else key for key in keys] UpperCamelCase__ : int = Counter(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}') def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False): UpperCamelCase__ : Union[str, Any] = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_) return mapping def __UpperCAmelCase ( lowerCamelCase_) -> Tuple[Optional[str], str]: UpperCamelCase__ : str = list(readme_content.splitlines()) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCamelCase__ : Dict = full_content[1:].index('---') + 1 UpperCamelCase__ : Any = '\n'.join(full_content[1:sep_idx]) return yamlblock, "\n".join(full_content[sep_idx + 1 :]) return None, "\n".join(lowerCamelCase_) class __lowercase (__lowerCamelCase ): # class attributes _lowerCamelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def __UpperCamelCase ( cls : List[str] , UpperCAmelCase_ : Path): with open(UpperCAmelCase_ , encoding='utf-8') as readme_file: UpperCamelCase__ : Any = _split_yaml_from_readme(readme_file.read()) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_) else: return cls() def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Path): if path.exists(): with open(UpperCAmelCase_ , encoding='utf-8') as readme_file: UpperCamelCase__ : Tuple = readme_file.read() else: UpperCamelCase__ : List[Any] = None UpperCamelCase__ : List[Any] = self._to_readme(UpperCAmelCase_) with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as readme_file: readme_file.write(UpperCAmelCase_) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[str] = None): if readme_content is not None: UpperCamelCase__ : Any = _split_yaml_from_readme(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: UpperCamelCase__ : List[str] = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : str): UpperCamelCase__ : Any = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader) or {} # Convert the YAML keys to DatasetMetadata fields UpperCamelCase__ : int = { (key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_) def __UpperCamelCase ( self : Any): return yaml.safe_dump( { (key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='utf-8' , ).decode('utf-8') lowerCAmelCase__ = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCAmelCase__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') lowerCAmelCase__ = ap.parse_args() lowerCAmelCase__ = Path(args.readme_filepath) lowerCAmelCase__ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
703
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def __UpperCAmelCase ( lowerCamelCase_) -> float: return np.dot(lowerCamelCase_ , lowerCamelCase_) class __lowercase : def __init__( self : Tuple , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ): UpperCamelCase__ : Union[str, Any] = regularization UpperCamelCase__ : Optional[int] = gamma if kernel == "linear": UpperCamelCase__ : List[str] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') UpperCamelCase__ : Union[str, Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}' raise ValueError(UpperCAmelCase_) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Any = observations UpperCamelCase__ : Tuple = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: UpperCamelCase__ : Union[str, Any] = 0 ((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0) UpperCamelCase__ : Dict = Bounds(0 , self.regularization) UpperCamelCase__ : Any = minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x UpperCamelCase__ : str = l_star # calculating mean offset of separation plane to points UpperCamelCase__ : Any = 0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) UpperCamelCase__ : List[str] = s / n def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Optional[int] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
6
0
import math def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int: UpperCamelCase__ : List[str] = len(lowerCamelCase_) UpperCamelCase__ : Optional[int] = int(math.floor(math.sqrt(lowerCamelCase_))) UpperCamelCase__ : List[str] = 0 while arr[min(lowerCamelCase_ , lowerCamelCase_) - 1] < x: UpperCamelCase__ : Tuple = step step += int(math.floor(math.sqrt(lowerCamelCase_))) if prev >= n: return -1 while arr[prev] < x: UpperCamelCase__ : int = prev + 1 if prev == min(lowerCamelCase_ , lowerCamelCase_): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() lowerCAmelCase__ = [int(item) for item in user_input.split(',')] lowerCAmelCase__ = int(input('Enter the number to be searched:\n')) lowerCAmelCase__ = jump_search(arr, x) if res == -1: print('Number not found!') else: print(f'''Number {x} is at index {res}''')
704
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_) -> Any: UpperCamelCase__ : Dict = DPTConfig() if "large" in checkpoint_url: UpperCamelCase__ : List[str] = 1_024 UpperCamelCase__ : List[str] = 4_096 UpperCamelCase__ : Optional[int] = 24 UpperCamelCase__ : List[str] = 16 UpperCamelCase__ : List[str] = [5, 11, 17, 23] UpperCamelCase__ : str = [256, 512, 1_024, 1_024] UpperCamelCase__ : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: UpperCamelCase__ : int = True UpperCamelCase__ : Optional[Any] = 150 UpperCamelCase__ : int = 'huggingface/label-files' UpperCamelCase__ : List[Any] = 'ade20k-id2label.json' UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r')) UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()} UpperCamelCase__ : Union[str, Any] = idalabel UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Any = [1, 150, 480, 480] return config, expected_shape def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder') if "pretrained.model" in name: UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings') if "patch_embed" in name: UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings') if "pos_embed" in name: UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings') if "attn.proj" in name: UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense') if "proj" in name and "project" not in name: UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection') if "blocks" in name: UpperCamelCase__ : int = name.replace('blocks' , 'layer') if "mlp.fc1" in name: UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense') if "mlp.fc2" in name: UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense') if "norm1" in name: UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before') if "norm2" in name: UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after') if "scratch.output_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head') if "scratch" in name: UpperCamelCase__ : int = name.replace('scratch' , 'neck') if "layer1_rn" in name: UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0') if "layer2_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1') if "layer3_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2') if "layer4_rn" in name: UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3') if "refinenet" in name: UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}') if "out_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection') if "resConfUnit1" in name: UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1') if "resConfUnit2" in name: UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2') if "conv1" in name: UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1') if "conv2" in name: UpperCamelCase__ : int = name.replace('conv2' , 'convolution2') # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0') if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0') if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0') if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0') # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection') if "pretrained.act_postprocess1.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize') if "pretrained.act_postprocess2.3" in name: UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection') if "pretrained.act_postprocess2.4" in name: UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize') if "pretrained.act_postprocess3.3" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection') if "pretrained.act_postprocess4.3" in name: UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection') if "pretrained.act_postprocess4.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize') if "pretrained" in name: UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt') if "bn" in name: UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm') if "head" in name: UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head') if "encoder.norm" in name: UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm') if "auxlayer" in name: UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head') return name def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any: for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight') UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias') # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :] UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size] UpperCamelCase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw) return im @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_) # load original state_dict from URL UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu') # remove certain keys remove_ignore_keys_(lowerCamelCase_) # rename keys for key in state_dict.copy().keys(): UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_) UpperCamelCase__ : List[str] = val # read in qkv matrices read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_) # load HuggingFace model UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_) model.load_state_dict(lowerCamelCase_) model.eval() # Check outputs on an image UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384 UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_) UpperCamelCase__ : int = prepare_img() UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt') # forward pass UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth # Assert logits UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]]) if "ade" in checkpoint_url: UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]]) assert outputs.shape == torch.Size(lowerCamelCase_) assert ( torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_) ) Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase_) if push_to_hub: print('Pushing model to hub...') model.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) lowerCAmelCase__ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
6
0
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets lowerCAmelCase__ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' lowerCAmelCase__ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' lowerCAmelCase__ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Union[str, Any]: if label_map is not None: for old_id, new_id in label_map.items(): UpperCamelCase__ : Optional[Any] = new_id # turn into Numpy arrays UpperCamelCase__ : str = np.array(lowerCamelCase_) UpperCamelCase__ : Dict = np.array(lowerCamelCase_) if reduce_labels: UpperCamelCase__ : List[str] = 255 UpperCamelCase__ : Union[str, Any] = label - 1 UpperCamelCase__ : Optional[Any] = 255 UpperCamelCase__ : Dict = label != ignore_index UpperCamelCase__ : List[Any] = np.not_equal(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : int = pred_label[mask] UpperCamelCase__ : Optional[Any] = np.array(lowerCamelCase_)[mask] UpperCamelCase__ : Union[str, Any] = pred_label[pred_label == label] UpperCamelCase__ : int = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0] UpperCamelCase__ : Optional[int] = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0] UpperCamelCase__ : Optional[int] = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0] UpperCamelCase__ : int = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> List[Any]: UpperCamelCase__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa) UpperCamelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa) UpperCamelCase__ : int = np.zeros((num_labels,) , dtype=np.floataa) UpperCamelCase__ : Tuple = np.zeros((num_labels,) , dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase__ : Tuple = intersect_and_union( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Optional[Any]: UpperCamelCase__ : Dict = total_intersect_and_union( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) # compute metrics UpperCamelCase__ : Optional[int] = {} UpperCamelCase__ : List[Any] = total_area_intersect.sum() / total_area_label.sum() UpperCamelCase__ : Tuple = total_area_intersect / total_area_union UpperCamelCase__ : str = total_area_intersect / total_area_label UpperCamelCase__ : Union[str, Any] = np.nanmean(lowerCamelCase_) UpperCamelCase__ : List[Any] = np.nanmean(lowerCamelCase_) UpperCamelCase__ : Any = all_acc UpperCamelCase__ : Optional[int] = iou UpperCamelCase__ : Union[str, Any] = acc if nan_to_num is not None: UpperCamelCase__ : Optional[int] = {metric: np.nan_to_num(lowerCamelCase_ , nan=lowerCamelCase_) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase (datasets.Metric ): def __UpperCamelCase ( self : Optional[int]): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))), }) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ): UpperCamelCase__ : Tuple = mean_iou( results=UpperCAmelCase_ , gt_seg_maps=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , ignore_index=UpperCAmelCase_ , nan_to_num=UpperCAmelCase_ , label_map=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ , ) return iou_result
705
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ): UpperCamelCase__ : Tuple = parent UpperCamelCase__ : List[str] = batch_size UpperCamelCase__ : Optional[Any] = image_size UpperCamelCase__ : Optional[Any] = patch_size UpperCamelCase__ : List[str] = num_channels UpperCamelCase__ : Union[str, Any] = is_training UpperCamelCase__ : int = use_labels UpperCamelCase__ : Optional[int] = hidden_size UpperCamelCase__ : Any = num_hidden_layers UpperCamelCase__ : str = num_attention_heads UpperCamelCase__ : str = intermediate_size UpperCamelCase__ : Union[str, Any] = hidden_act UpperCamelCase__ : Optional[int] = hidden_dropout_prob UpperCamelCase__ : Tuple = attention_probs_dropout_prob UpperCamelCase__ : Any = type_sequence_label_size UpperCamelCase__ : int = initializer_range UpperCamelCase__ : Optional[int] = mask_ratio UpperCamelCase__ : int = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase__ : str = (image_size // patch_size) ** 2 UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCamelCase__ : List[str] = None if self.use_labels: UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCamelCase__ : Any = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : List[Any]): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]): UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple): UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Dict = model(UpperCAmelCase_) UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2 UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) # test greyscale images UpperCamelCase__ : List[Any] = 1 UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[str] = self.prepare_config_and_inputs() UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs UpperCamelCase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): _lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () _lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : List[str] = ViTMAEModelTester(self) UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37) def __UpperCamelCase ( self : Any): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds') def __UpperCamelCase ( self : Tuple): pass def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) UpperCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear)) def __UpperCamelCase ( self : List[str]): UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_) UpperCamelCase__ : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ : Any = [*signature.parameters.keys()] UpperCamelCase__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]): # make masks reproducible np.random.seed(2) UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase__ : List[str] = pt_noise super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) UpperCamelCase__ : Dict = outputs[0].cpu().numpy() UpperCamelCase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_) model.to(UpperCAmelCase_) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) # Make sure we don't have nans UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy() UpperCamelCase__ : Any = 0 UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(UpperCAmelCase_ , 1e-5) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Tuple): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Optional[int]): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Tuple): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load') def __UpperCamelCase ( self : Tuple): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self : Optional[int]): pass @slow def __UpperCamelCase ( self : Optional[Any]): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class __lowercase (unittest.TestCase ): @cached_property def __UpperCamelCase ( self : int): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None @slow def __UpperCamelCase ( self : str): # make random mask reproducible across the PT and TF model np.random.seed(2) UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.default_image_processor UpperCamelCase__ : Dict = prepare_img() UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase__ : Union[str, Any] = ViTMAEConfig() UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_)) # verify the logits UpperCamelCase__ : Tuple = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape , UpperCAmelCase_) UpperCamelCase__ : Any = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]]) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
6
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''imagegpt''' _lowerCamelCase = ['''past_key_values'''] _lowerCamelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Tuple , UpperCAmelCase_ : Dict=512 + 1 , UpperCAmelCase_ : int=32 * 32 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[Any]=24 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]="quick_gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=1e-5 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : List[str] , ): UpperCamelCase__ : int = vocab_size UpperCamelCase__ : int = n_positions UpperCamelCase__ : List[str] = n_embd UpperCamelCase__ : Tuple = n_layer UpperCamelCase__ : Dict = n_head UpperCamelCase__ : Any = n_inner UpperCamelCase__ : Union[str, Any] = activation_function UpperCamelCase__ : Tuple = resid_pdrop UpperCamelCase__ : str = embd_pdrop UpperCamelCase__ : List[Any] = attn_pdrop UpperCamelCase__ : str = layer_norm_epsilon UpperCamelCase__ : Optional[int] = initializer_range UpperCamelCase__ : Dict = scale_attn_weights UpperCamelCase__ : Dict = use_cache UpperCamelCase__ : Union[str, Any] = scale_attn_by_inverse_layer_idx UpperCamelCase__ : int = reorder_and_upcast_attn UpperCamelCase__ : List[Any] = tie_word_embeddings super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_) class __lowercase (__lowerCamelCase ): @property def __UpperCamelCase ( self : Optional[Any]): return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ]) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : "FeatureExtractionMixin" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , ): UpperCamelCase__ : str = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : List[str] = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)) return inputs
706
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowercase (metaclass=__lowerCamelCase ): _lowerCamelCase = ['''torch''', '''scipy'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int): requires_backends(self , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]): requires_backends(cls , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any): requires_backends(cls , ['torch', 'scipy'])
6
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : int = 384 if "tiny" in model_name: UpperCamelCase__ : List[str] = [3, 3, 9, 3] UpperCamelCase__ : int = [96, 192, 384, 768] if "small" in model_name: UpperCamelCase__ : Tuple = [3, 3, 27, 3] UpperCamelCase__ : List[Any] = [96, 192, 384, 768] if "base" in model_name: UpperCamelCase__ : List[str] = [3, 3, 27, 3] UpperCamelCase__ : Any = [128, 256, 512, 1_024] UpperCamelCase__ : Optional[int] = 512 if "large" in model_name: UpperCamelCase__ : str = [3, 3, 27, 3] UpperCamelCase__ : str = [192, 384, 768, 1_536] UpperCamelCase__ : str = 768 if "xlarge" in model_name: UpperCamelCase__ : Optional[int] = [3, 3, 27, 3] UpperCamelCase__ : Optional[int] = [256, 512, 1_024, 2_048] UpperCamelCase__ : List[str] = 1_024 # set label information UpperCamelCase__ : Optional[Any] = 150 UpperCamelCase__ : Union[str, Any] = 'huggingface/label-files' UpperCamelCase__ : Tuple = 'ade20k-id2label.json' UpperCamelCase__ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset') , 'r')) UpperCamelCase__ : Union[str, Any] = {int(lowerCamelCase_): v for k, v in idalabel.items()} UpperCamelCase__ : int = {v: k for k, v in idalabel.items()} UpperCamelCase__ : int = ConvNextConfig( depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4']) UpperCamelCase__ : List[Any] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]: UpperCamelCase__ : str = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight')) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias')) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight')) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias')) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter')) rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight')) rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias')) rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight')) rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias')) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight')) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias')) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight')) rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias')) if i > 0: rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight')) rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias')) rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight')) rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias')) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight')) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias')) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ]) # fmt: on return rename_keys def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Tuple = dct.pop(lowerCamelCase_) UpperCamelCase__ : Optional[Any] = val def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]: UpperCamelCase__ : Optional[int] = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } UpperCamelCase__ : Optional[int] = model_name_to_url[model_name] UpperCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')['state_dict'] UpperCamelCase__ : Tuple = get_upernet_config(lowerCamelCase_) UpperCamelCase__ : List[Any] = UperNetForSemanticSegmentation(lowerCamelCase_) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCamelCase__ : Optional[Any] = state_dict.pop(lowerCamelCase_) if "bn" in key: UpperCamelCase__ : Tuple = key.replace('bn' , 'batch_norm') UpperCamelCase__ : List[Any] = val # rename keys UpperCamelCase__ : Union[str, Any] = create_rename_keys(lowerCamelCase_) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) model.load_state_dict(lowerCamelCase_) # verify on image UpperCamelCase__ : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' UpperCamelCase__ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw).convert('RGB') UpperCamelCase__ : List[Any] = SegformerImageProcessor() UpperCamelCase__ : Tuple = processor(lowerCamelCase_ , return_tensors='pt').pixel_values with torch.no_grad(): UpperCamelCase__ : Union[str, Any] = model(lowerCamelCase_) if model_name == "upernet-convnext-tiny": UpperCamelCase__ : List[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]]) elif model_name == "upernet-convnext-small": UpperCamelCase__ : str = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]]) elif model_name == "upernet-convnext-base": UpperCamelCase__ : Optional[int] = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]]) elif model_name == "upernet-convnext-large": UpperCamelCase__ : Union[str, Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]]) elif model_name == "upernet-convnext-xlarge": UpperCamelCase__ : Optional[int] = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]]) print('Logits:' , outputs.logits[0, 0, :3, :3]) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4) print('Looks ok!') if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving processor to {pytorch_dump_folder_path}') processor.save_pretrained(lowerCamelCase_) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub') model.push_to_hub(f'openmmlab/{model_name}') processor.push_to_hub(f'openmmlab/{model_name}') if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[f'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowerCAmelCase__ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
707
'''simple docstring''' class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False): # Mapping from the first character of the prefix of the node UpperCamelCase__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word UpperCamelCase__ : List[Any] = is_leaf UpperCamelCase__ : Optional[Any] = prefix def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[int] = 0 for q, w in zip(self.prefix , UpperCAmelCase_): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]): for word in words: self.insert(UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: UpperCamelCase__ : Optional[Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_) else: UpperCamelCase__ : int = self.nodes[word[0]] UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match( UpperCAmelCase_) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(UpperCAmelCase_) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: UpperCamelCase__ : Tuple = remaining_prefix UpperCamelCase__ : str = self.nodes[matching_string[0]] UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : str = aux_node if remaining_word == "": UpperCamelCase__ : int = True else: self.nodes[matching_string[0]].insert(UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_) if not incoming_node: return False else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match( UpperCAmelCase_) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(UpperCAmelCase_) def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_) if not incoming_node: return False else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match( UpperCAmelCase_) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(UpperCAmelCase_) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes) == 1 and not self.is_leaf: UpperCamelCase__ : List[str] = list(self.nodes.values())[0] UpperCamelCase__ : Tuple = merging_node.is_leaf self.prefix += merging_node.prefix UpperCamelCase__ : Tuple = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes) > 1: UpperCamelCase__ : str = False # If there is 1 edge, we merge it with its child else: UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0] UpperCamelCase__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix UpperCamelCase__ : Union[str, Any] = merging_node.nodes return True def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0): if self.prefix != "": print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '') for value in self.nodes.values(): value.print_tree(height + 1) def __UpperCAmelCase ( ) -> bool: UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split() UpperCamelCase__ : List[Any] = RadixNode() root.insert_many(lowerCamelCase_) assert all(root.find(lowerCamelCase_) for word in words) assert not root.find('bandanas') assert not root.find('apps') root.delete('all') assert not root.find('all') root.delete('banana') assert not root.find('banana') assert root.find('bananas') return True def __UpperCAmelCase ( ) -> None: assert test_trie() def __UpperCAmelCase ( ) -> None: UpperCamelCase__ : List[Any] = RadixNode() UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split() root.insert_many(lowerCamelCase_) print('Words:' , lowerCamelCase_) print('Tree:') root.print_tree() if __name__ == "__main__": main()
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ = 10 , lowerCamelCase_ = 1_000 , lowerCamelCase_ = True) -> int: assert ( isinstance(lowerCamelCase_ , lowerCamelCase_) and isinstance(lowerCamelCase_ , lowerCamelCase_) and isinstance(lowerCamelCase_ , lowerCamelCase_) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError('Invalid value for min_val or max_val (min_value < max_value)') return min_val if option else max_val def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int: return int((number_a + number_a) / 2) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None: assert ( isinstance(lowerCamelCase_ , lowerCamelCase_) and isinstance(lowerCamelCase_ , lowerCamelCase_) and isinstance(lowerCamelCase_ , lowerCamelCase_) ), 'argument values must be type of "int"' if lower > higher: raise ValueError('argument value for lower and higher must be(lower > higher)') if not lower < to_guess < higher: raise ValueError( 'guess value must be within the range of lower and higher value') def answer(lowerCamelCase_) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print('started...') UpperCamelCase__ : Dict = lower UpperCamelCase__ : Optional[Any] = higher UpperCamelCase__ : Optional[int] = [] while True: UpperCamelCase__ : List[Any] = get_avg(lowerCamelCase_ , lowerCamelCase_) last_numbers.append(lowerCamelCase_) if answer(lowerCamelCase_) == "low": UpperCamelCase__ : Any = number elif answer(lowerCamelCase_) == "high": UpperCamelCase__ : Optional[Any] = number else: break print(f'guess the number : {last_numbers[-1]}') print(f'details : {last_numbers!s}') def __UpperCAmelCase ( ) -> None: UpperCamelCase__ : Dict = int(input('Enter lower value : ').strip()) UpperCamelCase__ : Union[str, Any] = int(input('Enter high value : ').strip()) UpperCamelCase__ : Optional[int] = int(input('Enter value to guess : ').strip()) guess_the_number(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) if __name__ == "__main__": main()
708
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase__ = False class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : int): return 12 @property def __UpperCamelCase ( self : Tuple): return 12 @property def __UpperCamelCase ( self : Dict): return 32 @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def __UpperCamelCase ( self : List[str]): torch.manual_seed(0) UpperCamelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = 12 UpperCamelCase__ : Dict = 12 UpperCamelCase__ : Union[str, Any] = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_) return model def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = 'cpu' UpperCamelCase__ : List[str] = self.dummy_vqvae UpperCamelCase__ : List[str] = self.dummy_text_encoder UpperCamelCase__ : Optional[int] = self.dummy_tokenizer UpperCamelCase__ : List[str] = self.dummy_transformer UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_) UpperCamelCase__ : int = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Optional[int] = 'cpu' UpperCamelCase__ : str = self.dummy_vqvae UpperCamelCase__ : Any = self.dummy_text_encoder UpperCamelCase__ : List[Any] = self.dummy_tokenizer UpperCamelCase__ : Dict = self.dummy_transformer UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) UpperCamelCase__ : str = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : str = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : int = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy') UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq') UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_) pipeline.set_progress_bar_config(disable=UpperCAmelCase_) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : int = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , ) UpperCamelCase__ : int = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCAmelCase__ = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
709
'''simple docstring''' import numpy as np from PIL import Image def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : str = 0 # compute the shape of the output matrix UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape)) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size]) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[int] = 0 return updated_arr def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : Tuple = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : List[str] = 0 UpperCamelCase__ : List[Any] = 0 # compute the shape of the output matrix UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape)) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size])) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : Union[str, Any] = 0 UpperCamelCase__ : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image lowerCAmelCase__ = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
6
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: # Initialise PyTorch model UpperCamelCase__ : Optional[int] = BertConfig.from_json_file(lowerCamelCase_) print(f'Building PyTorch model from configuration: {config}') UpperCamelCase__ : str = BertForPreTraining(lowerCamelCase_) # Load weights from tf checkpoint load_tf_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}') torch.save(model.state_dict() , lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--bert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
710
'''simple docstring''' from __future__ import annotations class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]): UpperCamelCase__ : int = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.') if len(UpperCAmelCase_) != 0: UpperCamelCase__ : str = len(rows[0]) if cols == 0: raise error for row in rows: if len(UpperCAmelCase_) != cols: raise error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise error UpperCamelCase__ : Optional[int] = rows else: UpperCamelCase__ : Optional[Any] = [] def __UpperCamelCase ( self : Union[str, Any]): return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property def __UpperCamelCase ( self : Dict): return len(self.rows) @property def __UpperCamelCase ( self : Tuple): return len(self.rows[0]) @property def __UpperCamelCase ( self : List[Any]): return (self.num_rows, self.num_columns) @property def __UpperCamelCase ( self : Any): return self.order[0] == self.order[1] def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[int] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : Dict): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0]) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0])) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns)) def __UpperCamelCase ( self : str): return bool(self.determinant()) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int): UpperCamelCase__ : Optional[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns) if other_column != column ] for other_row in range(self.num_rows) if other_row != row ] return Matrix(UpperCAmelCase_).determinant() def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int): if (row + column) % 2 == 0: return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : List[Any]): return Matrix( [ [self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)] for row in range(self.num_rows) ]) def __UpperCamelCase ( self : Optional[int]): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns) ] for row in range(self.minors().num_rows) ]) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse') return self.adjugate() * (1 / determinant) def __repr__( self : Any): return str(self.rows) def __str__( self : List[Any]): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]' for row in self.rows ]) + "]" ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix') if position is None: self.rows.append(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:] def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : int = TypeError( 'Column must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in column: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix') if position is None: UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)] else: UpperCamelCase__ : str = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows) ] def __eq__( self : List[Any] , UpperCAmelCase_ : object): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): return NotImplemented return self.rows == other.rows def __ne__( self : Any , UpperCAmelCase_ : object): return not self == other def __neg__( self : Union[str, Any]): return self * -1 def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Addition requires matrices of the same order') return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __sub__( self : Tuple , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order') return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float): if isinstance(UpperCAmelCase_ , (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows]) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second') return Matrix( [ [Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()] for row in self.rows ]) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix') def __pow__( self : Dict , UpperCAmelCase_ : int): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise TypeError('A Matrix can only be raised to the power of an int') if not self.is_square: raise ValueError('Only square matrices can be raised to a power') if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power') UpperCamelCase__ : str = self for _ in range(other - 1): result *= self return result @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]): return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_))) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' class __lowercase : def __init__( self : str): UpperCamelCase__ : Union[str, Any] = 0 UpperCamelCase__ : Any = 0 UpperCamelCase__ : int = {} def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : int): if vertex not in self.adjacency: UpperCamelCase__ : Tuple = {} self.num_vertices += 1 def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple): self.add_vertex(UpperCAmelCase_) self.add_vertex(UpperCAmelCase_) if head == tail: return UpperCamelCase__ : Tuple = weight UpperCamelCase__ : Optional[int] = weight def __UpperCamelCase ( self : Union[str, Any]): UpperCamelCase__ : List[Any] = self.get_edges() for edge in edges: UpperCamelCase__ : str = edge edges.remove((tail, head, weight)) for i in range(len(UpperCAmelCase_)): UpperCamelCase__ : Any = list(edges[i]) edges.sort(key=lambda UpperCAmelCase_: e[2]) for i in range(len(UpperCAmelCase_) - 1): if edges[i][2] >= edges[i + 1][2]: UpperCamelCase__ : Optional[int] = edges[i][2] + 1 for edge in edges: UpperCamelCase__ : List[Any] = edge UpperCamelCase__ : Dict = weight UpperCamelCase__ : List[Any] = weight def __str__( self : Any): UpperCamelCase__ : List[Any] = '' for tail in self.adjacency: for head in self.adjacency[tail]: UpperCamelCase__ : Union[str, Any] = self.adjacency[head][tail] string += F'{head} -> {tail} == {weight}\n' return string.rstrip('\n') def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Optional[Any] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail])) return output def __UpperCamelCase ( self : List[str]): return self.adjacency.keys() @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None): UpperCamelCase__ : Tuple = Graph() if vertices is None: UpperCamelCase__ : Tuple = [] if edges is None: UpperCamelCase__ : List[str] = [] for vertex in vertices: g.add_vertex(UpperCAmelCase_) for edge in edges: g.add_edge(*UpperCAmelCase_) return g class __lowercase : def __init__( self : Tuple): UpperCamelCase__ : int = {} UpperCamelCase__ : List[str] = {} def __len__( self : Any): return len(self.parent) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]): if item in self.parent: return self.find(UpperCAmelCase_) UpperCamelCase__ : List[Any] = item UpperCamelCase__ : Dict = 0 return item def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int]): if item not in self.parent: return self.make_set(UpperCAmelCase_) if item != self.parent[item]: UpperCamelCase__ : List[str] = self.find(self.parent[item]) return self.parent[item] def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int): UpperCamelCase__ : List[str] = self.find(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.find(UpperCAmelCase_) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: UpperCamelCase__ : Union[str, Any] = roota return roota if self.rank[roota] < self.rank[roota]: UpperCamelCase__ : Any = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 UpperCamelCase__ : Dict = roota return roota return None @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : Optional[Any]): UpperCamelCase__ : Optional[Any] = graph.num_vertices UpperCamelCase__ : Tuple = Graph.UnionFind() UpperCamelCase__ : Optional[int] = [] while num_components > 1: UpperCamelCase__ : Tuple = {} for vertex in graph.get_vertices(): UpperCamelCase__ : Optional[Any] = -1 UpperCamelCase__ : int = graph.get_edges() for edge in edges: UpperCamelCase__ : str = edge edges.remove((tail, head, weight)) for edge in edges: UpperCamelCase__ : Optional[Any] = edge UpperCamelCase__ : Union[str, Any] = union_find.find(UpperCAmelCase_) UpperCamelCase__ : List[Any] = union_find.find(UpperCAmelCase_) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCamelCase__ : Optional[Any] = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCamelCase__ : Tuple = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: UpperCamelCase__ : Union[str, Any] = cheap_edge[vertex] if union_find.find(UpperCAmelCase_) != union_find.find(UpperCAmelCase_): union_find.union(UpperCAmelCase_ , UpperCAmelCase_) mst_edges.append(cheap_edge[vertex]) UpperCamelCase__ : int = num_components - 1 UpperCamelCase__ : Dict = Graph.build(edges=UpperCAmelCase_) return mst
711
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowercase : def __UpperCamelCase ( self : Union[str, Any]): torch.manual_seed(0) UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : List[str] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Dict): torch.manual_seed(0) UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : str = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[str] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Any): UpperCamelCase__ : Dict = self.get_dummy_components() UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = inputs['prompt'] UpperCamelCase__ : List[Any] = inputs['generator'] UpperCamelCase__ : Tuple = inputs['num_inference_steps'] UpperCamelCase__ : List[Any] = inputs['output_type'] if "image" in inputs: UpperCamelCase__ : Tuple = inputs['image'] else: UpperCamelCase__ : Union[str, Any] = None if "mask_image" in inputs: UpperCamelCase__ : Optional[int] = inputs['mask_image'] else: UpperCamelCase__ : int = None if "original_image" in inputs: UpperCamelCase__ : List[Any] = inputs['original_image'] else: UpperCamelCase__ : Optional[Any] = None UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_) # inputs with prompt converted to embeddings UpperCamelCase__ : List[Any] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Dict = image if mask_image is not None: UpperCamelCase__ : Optional[int] = mask_image if original_image is not None: UpperCamelCase__ : Union[str, Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , ) UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = inputs['generator'] UpperCamelCase__ : List[Any] = inputs['num_inference_steps'] UpperCamelCase__ : Optional[int] = inputs['output_type'] # inputs with prompt converted to embeddings UpperCamelCase__ : Any = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Tuple = image if mask_image is not None: UpperCamelCase__ : Union[str, Any] = mask_image if original_image is not None: UpperCamelCase__ : str = original_image UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Any = self.get_dummy_components() UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4)
6
0
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase__ = 300 # TEMPERATURE (unit = K) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): if donor_conc <= 0: raise ValueError('Donor concentration should be positive') elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive') elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive') elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration') elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration') else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase__ = 3 def __UpperCAmelCase ( lowerCamelCase_) -> int: print('Generating primitive root of p') while True: UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_) if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1: continue if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1: continue return g def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('Generating prime p...') UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number. UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p. UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety. UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_) UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p) UpperCamelCase__ : Optional[Any] = (key_size, d) return public_key, private_key def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None: if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'): print('\nWARNING:') print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' 'Use a different name or delete these files and re-run this program.') sys.exit() UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_) print(f'\nWriting public key to file {name}_pubkey.txt...') with open(f'{name}_pubkey.txt' , 'w') as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}') print(f'Writing private key to file {name}_privkey.txt...') with open(f'{name}_privkey.txt' , 'w') as fo: fo.write(f'{private_key[0]},{private_key[1]}') def __UpperCAmelCase ( ) -> None: print('Making key files...') make_key_files('elgamal' , 2_048) print('Key files generation successful') if __name__ == "__main__": main()
6
0
'''simple docstring''' import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) lowerCAmelCase__ = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0522, type=int) lowerCAmelCase__ = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: lowerCAmelCase__ = pickle.load(fp) logger.info('Counting occurrences for MLM.') lowerCAmelCase__ = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase__ = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase__ = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
713
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase__ = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: for attribute in key.split('.'): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCamelCase__ : str = 'lm_head' UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_) if weight_type is not None: UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape else: UpperCamelCase__ : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": UpperCamelCase__ : Optional[Any] = value elif weight_type == "weight_g": UpperCamelCase__ : Union[str, Any] = value elif weight_type == "weight_v": UpperCamelCase__ : List[Any] = value elif weight_type == "bias": UpperCamelCase__ : Any = value else: UpperCamelCase__ : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : List[Any] = [] UpperCamelCase__ : int = fairseq_model.state_dict() UpperCamelCase__ : int = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]: UpperCamelCase__ : Any = True if "*" in mapped_key: UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2] UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_) if "weight_g" in name: UpperCamelCase__ : int = 'weight_g' elif "weight_v" in name: UpperCamelCase__ : Any = 'weight_v' elif "bias" in name: UpperCamelCase__ : Union[str, Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase__ : Any = 'weight' else: UpperCamelCase__ : Tuple = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) continue if not is_used: unused_weights.append(lowerCamelCase_) logger.warning(f'Unused weights: {unused_weights}') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1] UpperCamelCase__ : List[Any] = name.split('.') UpperCamelCase__ : Any = int(items[0]) UpperCamelCase__ : int = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) UpperCamelCase__ : Tuple = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) UpperCamelCase__ : int = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) UpperCamelCase__ : Optional[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) UpperCamelCase__ : List[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(lowerCamelCase_) @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple: if config_path is not None: UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_) else: UpperCamelCase__ : int = UniSpeechConfig() if is_finetuned: if dict_path: UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase__ : List[Any] = target_dict.pad_index UpperCamelCase__ : Dict = target_dict.bos_index UpperCamelCase__ : Union[str, Any] = target_dict.eos_index UpperCamelCase__ : Tuple = len(target_dict.symbols) UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json') if not os.path.isdir(lowerCamelCase_): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_)) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_) UpperCamelCase__ : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase__ : Any = 42 UpperCamelCase__ : List[str] = 43 with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle: json.dump(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , ) UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_) processor.save_pretrained(lowerCamelCase_) UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_) else: UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_) if is_finetuned: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path}) else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) UpperCamelCase__ : int = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) hf_unispeech.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase__ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
6
0
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple: assert isinstance(lowerCamelCase_ , lowerCamelCase_) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int: UpperCamelCase__ : List[str] = tmp_path / 'cache' UpperCamelCase__ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ : Union[str, Any] = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_).read() _check_json_dataset(lowerCamelCase_ , lowerCamelCase_) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int: UpperCamelCase__ : Optional[int] = tmp_path / 'cache' UpperCamelCase__ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase__ : Optional[int] = features.copy() if features else default_expected_features UpperCamelCase__ : Tuple = ( Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None ) UpperCamelCase__ : Dict = JsonDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read() _check_json_dataset(lowerCamelCase_ , lowerCamelCase_) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__ : int = tmp_path / 'cache' UpperCamelCase__ : str = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} UpperCamelCase__ : Any = features.copy() if features else default_expected_features UpperCamelCase__ : Optional[int] = ( Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None ) UpperCamelCase__ : Dict = JsonDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read() assert isinstance(lowerCamelCase_ , lowerCamelCase_) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} UpperCamelCase__ : Optional[Any] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} UpperCamelCase__ : Union[str, Any] = features.copy() UpperCamelCase__ : Union[str, Any] = ( Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None ) UpperCamelCase__ : str = tmp_path / 'cache' UpperCamelCase__ : Optional[int] = JsonDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read() assert isinstance(lowerCamelCase_ , lowerCamelCase_) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train'), 'train', 'test']) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any: UpperCamelCase__ : int = tmp_path / 'cache' UpperCamelCase__ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase__ : Optional[Any] = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_).read() _check_json_dataset(lowerCamelCase_ , lowerCamelCase_) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any: if issubclass(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase__ : Optional[Any] = jsonl_path elif issubclass(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase__ : Tuple = [jsonl_path] UpperCamelCase__ : Tuple = tmp_path / 'cache' UpperCamelCase__ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase__ : Optional[Any] = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_).read() _check_json_dataset(lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=("train",)) -> Optional[int]: assert isinstance(lowerCamelCase_ , lowerCamelCase_) for split in splits: UpperCamelCase__ : Tuple = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int: UpperCamelCase__ : List[Any] = tmp_path / 'cache' UpperCamelCase__ : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase__ : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_).read() _check_json_datasetdict(lowerCamelCase_ , lowerCamelCase_) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]: UpperCamelCase__ : List[str] = tmp_path / 'cache' UpperCamelCase__ : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase__ : Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase__ : List[str] = ( Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None ) UpperCamelCase__ : str = JsonDatasetReader({'train': jsonl_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read() _check_json_datasetdict(lowerCamelCase_ , lowerCamelCase_) @pytest.mark.parametrize('split' , [None, NamedSplit('train'), 'train', 'test']) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int: if split: UpperCamelCase__ : Tuple = {split: jsonl_path} else: UpperCamelCase__ : int = 'train' UpperCamelCase__ : List[str] = {'train': jsonl_path, 'test': jsonl_path} UpperCamelCase__ : Tuple = tmp_path / 'cache' UpperCamelCase__ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} UpperCamelCase__ : int = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_).read() _check_json_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]: return json.load(lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]: return [json.loads(lowerCamelCase_) for line in buffer] class __lowercase : @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)]) def __UpperCamelCase ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_).write() buffer.seek(0) UpperCamelCase__ : Optional[Any] = load_json_function(UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) assert isinstance(exported_content[0] , UpperCAmelCase_) assert len(UpperCAmelCase_) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789'), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_ , orient=UpperCAmelCase_).write() buffer.seek(0) UpperCamelCase__ : Optional[int] = load_json(UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCAmelCase_ , 'keys') and not hasattr(exported_content[0] , 'keys') if len_at: assert len(exported_content[len_at]) == 10 else: assert len(UpperCAmelCase_) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)]) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_ , num_proc=2).write() buffer.seek(0) UpperCamelCase__ : str = load_json_function(UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) assert isinstance(exported_content[0] , UpperCAmelCase_) assert len(UpperCAmelCase_) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789'), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_ , orient=UpperCAmelCase_ , num_proc=2).write() buffer.seek(0) UpperCamelCase__ : Union[str, Any] = load_json(UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCAmelCase_ , 'keys') and not hasattr(exported_content[0] , 'keys') if len_at: assert len(exported_content[len_at]) == 10 else: assert len(UpperCAmelCase_) == 10 def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int]): with pytest.raises(UpperCAmelCase_): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=0) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')]) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any): UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / F'test.json.{extension}' UpperCamelCase__ : Dict = str(shared_datadir / F'test_file.json.{extension}') JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , compression=UpperCAmelCase_).write() with fsspec.open(UpperCAmelCase_ , 'rb' , compression='infer') as f: UpperCamelCase__ : Optional[int] = f.read() with fsspec.open(UpperCAmelCase_ , 'rb' , compression='infer') as f: UpperCamelCase__ : int = f.read() assert exported_content == original_content
714
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[str]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Union[str, Any] = 1 UpperCamelCase__ : Union[str, Any] = 3 UpperCamelCase__ : Dict = (32, 32) UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_) return image @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCamelCase ( self : str): torch.manual_seed(0) UpperCamelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[Any]): def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict): class __lowercase : def __init__( self : List[Any]): UpperCamelCase__ : Optional[Any] = torch.ones([0]) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int): self.pixel_values.to(UpperCAmelCase_) return self return Out() return extract def __UpperCamelCase ( self : str): UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : Any = self.dummy_cond_unet UpperCamelCase__ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) UpperCamelCase__ : List[str] = self.dummy_vae UpperCamelCase__ : str = self.dummy_text_encoder UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Tuple = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : int = self.dummy_cond_unet UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Optional[int] = self.dummy_vae UpperCamelCase__ : Optional[int] = self.dummy_text_encoder UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Dict = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) assert isinstance(pipe.scheduler , UpperCAmelCase_) assert pipe.safety_checker is None UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Dict = self.dummy_cond_unet UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Any = self.dummy_vae UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # put models in fp16 UpperCamelCase__ : Any = unet.half() UpperCamelCase__ : Tuple = vae.half() UpperCamelCase__ : Optional[int] = bert.half() # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[int] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger' UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) UpperCamelCase__ : Any = 4_003_660_346 UpperCamelCase__ : Any = 7 # without safety guidance (sld_guidance_scale = 0) UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : str = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # without safety guidance (strong configuration) UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Dict = output.images UpperCamelCase__ : str = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity' UpperCamelCase__ : Tuple = 2_734_971_755 UpperCamelCase__ : Tuple = 7 UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : int = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : int = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5') UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : int = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) UpperCamelCase__ : Any = 1_044_355_234 UpperCamelCase__ : Optional[int] = 12 UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7 UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : List[str] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
6
0
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def __UpperCAmelCase ( lowerCamelCase_) -> float: return np.dot(lowerCamelCase_ , lowerCamelCase_) class __lowercase : def __init__( self : Tuple , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ): UpperCamelCase__ : Union[str, Any] = regularization UpperCamelCase__ : Optional[int] = gamma if kernel == "linear": UpperCamelCase__ : List[str] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') UpperCamelCase__ : Union[str, Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}' raise ValueError(UpperCAmelCase_) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Any = observations UpperCamelCase__ : Tuple = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations (UpperCamelCase__ ) : Optional[Any] = np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: UpperCamelCase__ : Union[str, Any] = 0 (UpperCamelCase__ ) : int = np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0) UpperCamelCase__ : Dict = Bounds(0 , self.regularization) UpperCamelCase__ : Any = minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x UpperCamelCase__ : str = l_star # calculating mean offset of separation plane to points UpperCamelCase__ : Any = 0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) UpperCamelCase__ : List[str] = s / n def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Optional[int] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
715
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCAmelCase__ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowerCAmelCase__ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __UpperCAmelCase ( ) -> Union[str, Any]: UpperCamelCase__ : Optional[Any] = ( list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1)) ) UpperCamelCase__ : List[Any] = bs[:] UpperCamelCase__ : Optional[int] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase_) cs.append(2**8 + n) n += 1 UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_)) def __UpperCAmelCase ( lowerCamelCase_) -> Tuple: UpperCamelCase__ : Any = set() UpperCamelCase__ : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char)) UpperCamelCase__ : str = char return pairs class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ): UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle: UpperCamelCase__ : Any = json.load(UpperCAmelCase_) UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()} UpperCamelCase__ : Any = errors # how to handle errors in decoding UpperCamelCase__ : Tuple = bytes_to_unicode() UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle: UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1] UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges] UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Dict = {} UpperCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __UpperCamelCase ( self : Tuple): return len(self.encoder) def __UpperCamelCase ( self : Tuple): return dict(self.encoder , **self.added_tokens_encoder) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]): if token in self.cache: return self.cache[token] UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_) UpperCamelCase__ : int = get_pairs(UpperCAmelCase_) if not pairs: return token while True: UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf'))) if bigram not in self.bpe_ranks: break UpperCamelCase__, UpperCamelCase__ : Tuple = bigram UpperCamelCase__ : Dict = [] UpperCamelCase__ : Optional[int] = 0 while i < len(UpperCAmelCase_): try: UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) UpperCamelCase__ : Any = j if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_) UpperCamelCase__ : Dict = new_word if len(UpperCAmelCase_) == 1: break else: UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : List[Any] = word return word def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any): UpperCamelCase__ : Optional[Any] = [] for token in re.findall(self.pat , UpperCAmelCase_): UpperCamelCase__ : Optional[int] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' ')) return bpe_tokens def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token)) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]): return self.decoder.get(UpperCAmelCase_) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int): UpperCamelCase__ : int = ''.join(UpperCAmelCase_) UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): if not os.path.isdir(UpperCAmelCase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return UpperCamelCase__ : str = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n') UpperCamelCase__ : str = 0 with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!') UpperCamelCase__ : List[Any] = token_index writer.write(' '.join(UpperCAmelCase_) + '\n') index += 1 return vocab_file, merge_file def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_)) + [1] return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1] def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Any = [self.sep_token_id] UpperCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]): UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()): UpperCamelCase__ : str = ' ' + text return (text, kwargs) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): return token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"): UpperCamelCase__ : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text) else: # Generated responses should contain them already. inputs.append(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : int = self.encode(UpperCAmelCase_) if len(UpperCAmelCase_) > self.model_max_length: UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.') return input_ids
6
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''open-llama''' def __init__( self : Optional[int] , UpperCAmelCase_ : Any=100_000 , UpperCAmelCase_ : Union[str, Any]=4_096 , UpperCAmelCase_ : int=11_008 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Optional[int]="silu" , UpperCAmelCase_ : Optional[int]=2_048 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-6 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Dict , ): UpperCamelCase__ : int = vocab_size UpperCamelCase__ : Optional[int] = max_position_embeddings UpperCamelCase__ : Dict = hidden_size UpperCamelCase__ : Dict = intermediate_size UpperCamelCase__ : List[Any] = num_hidden_layers UpperCamelCase__ : Union[str, Any] = num_attention_heads UpperCamelCase__ : Optional[int] = hidden_act UpperCamelCase__ : List[Any] = initializer_range UpperCamelCase__ : List[Any] = rms_norm_eps UpperCamelCase__ : Optional[int] = use_cache UpperCamelCase__ : Any = kwargs.pop( 'use_memorry_efficient_attention' , UpperCAmelCase_) UpperCamelCase__ : int = hidden_dropout_prob UpperCamelCase__ : Tuple = attention_dropout_prob UpperCamelCase__ : Optional[Any] = use_stable_embedding UpperCamelCase__ : Optional[Any] = shared_input_output_embedding UpperCamelCase__ : Dict = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __UpperCamelCase ( self : List[str]): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCAmelCase_) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}') UpperCamelCase__ : Optional[Any] = self.rope_scaling.get('type' , UpperCAmelCase_) UpperCamelCase__ : Any = self.rope_scaling.get('factor' , UpperCAmelCase_) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}') if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
716
'''simple docstring''' import requests from bsa import BeautifulSoup def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str: UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser') UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_).find('span').text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
6
0
'''simple docstring''' import operator def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None) -> list: UpperCamelCase__ : Union[str, Any] = operator.lt if reverse else operator.gt UpperCamelCase__ : List[str] = solution or [] if not arr: return solution UpperCamelCase__ : Tuple = [arr.pop(0)] for i, item in enumerate(lowerCamelCase_): if _operator(lowerCamelCase_ , sublist[-1]): sublist.append(lowerCamelCase_) arr.pop(lowerCamelCase_) # merging sublist into solution list if not solution: solution.extend(lowerCamelCase_) else: while sublist: UpperCamelCase__ : List[Any] = sublist.pop(0) for i, xx in enumerate(lowerCamelCase_): if not _operator(lowerCamelCase_ , lowerCamelCase_): solution.insert(lowerCamelCase_ , lowerCamelCase_) break else: solution.append(lowerCamelCase_) strand_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
717
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') model.to(UpperCAmelCase_) from datasets import load_dataset UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo') UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB') UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_) UpperCamelCase__ : Tuple = outputs.logits UpperCamelCase__ : str = torch.Size((1, 16)) self.assertEqual(logits.shape , UpperCAmelCase_) UpperCamelCase__ : Tuple = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
6
0
'''simple docstring''' import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input lowerCAmelCase__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCAmelCase ( ) -> List[str]: UpperCamelCase__ : int = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: UpperCamelCase__ : Any = get_sagemaker_input() else: UpperCamelCase__ : Optional[Any] = get_cluster_input() return config def __UpperCAmelCase ( lowerCamelCase_=None) -> int: if subparsers is not None: UpperCamelCase__ : Tuple = subparsers.add_parser('config' , description=lowerCamelCase_) else: UpperCamelCase__ : Tuple = argparse.ArgumentParser('Accelerate config command' , description=lowerCamelCase_) parser.add_argument( '--config_file' , default=lowerCamelCase_ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCamelCase_) return parser def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]: UpperCamelCase__ : Optional[Any] = get_user_input() if args.config_file is not None: UpperCamelCase__ : int = args.config_file else: if not os.path.isdir(lowerCamelCase_): os.makedirs(lowerCamelCase_) UpperCamelCase__ : Tuple = default_yaml_config_file if config_file.endswith('.json'): config.to_json_file(lowerCamelCase_) else: config.to_yaml_file(lowerCamelCase_) print(f'accelerate configuration saved at {config_file}') def __UpperCAmelCase ( ) -> Dict: UpperCamelCase__ : Dict = config_command_parser() UpperCamelCase__ : Optional[Any] = parser.parse_args() config_command(lowerCamelCase_) if __name__ == "__main__": main()
718
'''simple docstring''' import argparse import struct import unittest class __lowercase : def __init__( self : Tuple , UpperCAmelCase_ : bytes): UpperCamelCase__ : Dict = data # Initialize hash values UpperCamelCase__ : Any = [ 0X6A_09E_667, 0XBB_67A_E85, 0X3C_6EF_372, 0XA5_4FF_53A, 0X51_0E5_27F, 0X9B_056_88C, 0X1F_83D_9AB, 0X5B_E0C_D19, ] # Initialize round constants UpperCamelCase__ : List[Any] = [ 0X42_8A2_F98, 0X71_374_491, 0XB5_C0F_BCF, 0XE9_B5D_BA5, 0X39_56C_25B, 0X59_F11_1F1, 0X92_3F8_2A4, 0XAB_1C5_ED5, 0XD8_07A_A98, 0X12_835_B01, 0X24_318_5BE, 0X55_0C7_DC3, 0X72_BE5_D74, 0X80_DEB_1FE, 0X9B_DC0_6A7, 0XC1_9BF_174, 0XE4_9B6_9C1, 0XEF_BE4_786, 0X0F_C19_DC6, 0X24_0CA_1CC, 0X2D_E92_C6F, 0X4A_748_4AA, 0X5C_B0A_9DC, 0X76_F98_8DA, 0X98_3E5_152, 0XA8_31C_66D, 0XB0_032_7C8, 0XBF_597_FC7, 0XC6_E00_BF3, 0XD5_A79_147, 0X06_CA6_351, 0X14_292_967, 0X27_B70_A85, 0X2E_1B2_138, 0X4D_2C6_DFC, 0X53_380_D13, 0X65_0A7_354, 0X76_6A0_ABB, 0X81_C2C_92E, 0X92_722_C85, 0XA2_BFE_8A1, 0XA8_1A6_64B, 0XC2_4B8_B70, 0XC7_6C5_1A3, 0XD1_92E_819, 0XD6_990_624, 0XF4_0E3_585, 0X10_6AA_070, 0X19_A4C_116, 0X1E_376_C08, 0X27_487_74C, 0X34_B0B_CB5, 0X39_1C0_CB3, 0X4E_D8A_A4A, 0X5B_9CC_A4F, 0X68_2E6_FF3, 0X74_8F8_2EE, 0X78_A56_36F, 0X84_C87_814, 0X8C_C70_208, 0X90_BEF_FFA, 0XA4_506_CEB, 0XBE_F9A_3F7, 0XC6_717_8F2, ] UpperCamelCase__ : Tuple = self.preprocessing(self.data) self.final_hash() @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : bytes): UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64)) UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8)) return data + padding + big_endian_integer def __UpperCamelCase ( self : Union[str, Any]): # Convert into blocks of 64 bytes UpperCamelCase__ : int = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data) , 64) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_)) # add 48 0-ed integers words += [0] * 48 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes for index in range(0 , 64): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase__ : Dict = ( self.ror(words[index - 15] , 7) ^ self.ror(words[index - 15] , 18) ^ (words[index - 15] >> 3) ) UpperCamelCase__ : Tuple = ( self.ror(words[index - 2] , 17) ^ self.ror(words[index - 2] , 19) ^ (words[index - 2] >> 10) ) UpperCamelCase__ : int = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25) UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g) UpperCamelCase__ : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22) UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase__ : Optional[Any] = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes) ] UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes]) def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int): return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : int): import hashlib UpperCamelCase__ : str = bytes('Test String' , 'utf-8') self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest()) def __UpperCAmelCase ( ) -> None: import doctest doctest.testmod() UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file') UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : str = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb') as f: UpperCamelCase__ : Any = f.read() else: UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8') print(SHAaaa(lowerCamelCase_).hash) if __name__ == "__main__": main()
6
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class __lowercase (__lowerCamelCase ): def __init__( self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str): warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
719
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase__ = 300 # TEMPERATURE (unit = K) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float: if donor_conc <= 0: raise ValueError('Donor concentration should be positive') elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive') elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive') elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration') elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration') else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_) -> Any: UpperCamelCase__ : Dict = DPTConfig() if "large" in checkpoint_url: UpperCamelCase__ : List[str] = 1_024 UpperCamelCase__ : List[str] = 4_096 UpperCamelCase__ : Optional[int] = 24 UpperCamelCase__ : List[str] = 16 UpperCamelCase__ : List[str] = [5, 11, 17, 23] UpperCamelCase__ : str = [256, 512, 1_024, 1_024] UpperCamelCase__ : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: UpperCamelCase__ : int = True UpperCamelCase__ : Optional[Any] = 150 UpperCamelCase__ : int = 'huggingface/label-files' UpperCamelCase__ : List[Any] = 'ade20k-id2label.json' UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r')) UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()} UpperCamelCase__ : Union[str, Any] = idalabel UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Any = [1, 150, 480, 480] return config, expected_shape def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder') if "pretrained.model" in name: UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings') if "patch_embed" in name: UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings') if "pos_embed" in name: UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings') if "attn.proj" in name: UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense') if "proj" in name and "project" not in name: UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection') if "blocks" in name: UpperCamelCase__ : int = name.replace('blocks' , 'layer') if "mlp.fc1" in name: UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense') if "mlp.fc2" in name: UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense') if "norm1" in name: UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before') if "norm2" in name: UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after') if "scratch.output_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head') if "scratch" in name: UpperCamelCase__ : int = name.replace('scratch' , 'neck') if "layer1_rn" in name: UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0') if "layer2_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1') if "layer3_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2') if "layer4_rn" in name: UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3') if "refinenet" in name: UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}') if "out_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection') if "resConfUnit1" in name: UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1') if "resConfUnit2" in name: UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2') if "conv1" in name: UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1') if "conv2" in name: UpperCamelCase__ : int = name.replace('conv2' , 'convolution2') # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0') if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0') if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0') if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0') # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection') if "pretrained.act_postprocess1.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize') if "pretrained.act_postprocess2.3" in name: UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection') if "pretrained.act_postprocess2.4" in name: UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize') if "pretrained.act_postprocess3.3" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection') if "pretrained.act_postprocess4.3" in name: UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection') if "pretrained.act_postprocess4.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize') if "pretrained" in name: UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt') if "bn" in name: UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm') if "head" in name: UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head') if "encoder.norm" in name: UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm') if "auxlayer" in name: UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head') return name def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any: for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight') UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias') # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :] UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size] UpperCamelCase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw) return im @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_) # load original state_dict from URL UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu') # remove certain keys remove_ignore_keys_(lowerCamelCase_) # rename keys for key in state_dict.copy().keys(): UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_) UpperCamelCase__ : List[str] = val # read in qkv matrices read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_) # load HuggingFace model UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_) model.load_state_dict(lowerCamelCase_) model.eval() # Check outputs on an image UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384 UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_) UpperCamelCase__ : int = prepare_img() UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt') # forward pass UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth # Assert logits UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]]) if "ade" in checkpoint_url: UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]]) assert outputs.shape == torch.Size(lowerCamelCase_) assert ( torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_) ) Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase_) if push_to_hub: print('Pushing model to hub...') model.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) lowerCAmelCase__ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
720
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]: UpperCamelCase__ : int = [] if isinstance(lowerCamelCase_ , lowerCamelCase_): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , (list, tuple)): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , torch.Tensor): shapes.append(tree.shape) else: raise ValueError('Not supported') return shapes @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]: UpperCamelCase__ : int = [] for d in reversed(lowerCamelCase_): idx.append(flat_idx % d) UpperCamelCase__ : Any = flat_idx // d return tuple(reversed(lowerCamelCase_)) @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase_) -> None: UpperCamelCase__ : Tuple = True for i in range(len(lowerCamelCase_)): UpperCamelCase__ : List[Any] = -1 * (i + 1) l[reversed_idx] &= tally UpperCamelCase__ : Optional[Any] = l[reversed_idx] if start_edges is None: UpperCamelCase__ : int = [s == 0 for s in start] reduce_edge_list(lowerCamelCase_) if end_edges is None: UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)] reduce_edge_list(lowerCamelCase_) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase_) == 0: return [()] elif len(lowerCamelCase_) == 1: return [(slice(start[0] , end[0] + 1),)] UpperCamelCase__ : List[Tuple[slice, ...]] = [] UpperCamelCase__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase_ , lowerCamelCase_): if s == e: path_list.append(slice(lowerCamelCase_ , s + 1)) else: break UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_) UpperCamelCase__ : Dict = len(lowerCamelCase_) # start == end, and we're done if divergence_idx == len(lowerCamelCase_): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : str = start[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , sdi + 1),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , )) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : Optional[int] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , edi + 1),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , )) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),)) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),)) slices.extend(lower()) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper()) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),)) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper()) UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),)) slices.extend(lower()) return slices @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor: UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims] UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_)) # _get_minimal_slice_set is inclusive UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_)) # Get an ordered list of slices to perform UpperCamelCase__ : int = _get_minimal_slice_set( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) UpperCamelCase__ : List[Any] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any: if not (len(lowerCamelCase_) > 0): raise ValueError('Must provide at least one input') UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)] UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)]) def _prep_inputs(lowerCamelCase_) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims]) == no_batch_dims: UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:]) else: UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_) UpperCamelCase__ : int = None if _out is not None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out) UpperCamelCase__ : Dict = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase_) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[Any] = prepped_outputs for _ in range(lowerCamelCase_): # Chunk the input if not low_mem: UpperCamelCase__ : str = _select_chunk else: UpperCamelCase__ : List[Any] = partial( _chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , ) UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_) # Run the layer on the chunk UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_) # Allocate space for the output if out is None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase_ , lowerCamelCase_): def assign(lowerCamelCase_ , lowerCamelCase_) -> None: for k, v in da.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_): assign(lowerCamelCase_ , da[k]) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCamelCase__ : List[str] = da[k] assign(lowerCamelCase_ , lowerCamelCase_) elif isinstance(lowerCamelCase_ , lowerCamelCase_): for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCamelCase__ : int = xa elif isinstance(lowerCamelCase_ , torch.Tensor): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCamelCase__ : Dict = output_chunk else: raise ValueError('Not supported') i += chunk_size UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_) return out class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ): UpperCamelCase__ : str = max_chunk_size UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[tuple] = None def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int): logging.info('Tuning chunk size...') if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)] UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size] UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCAmelCase_ : int) -> bool: try: with torch.no_grad(): fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_) return True except RuntimeError: return False UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1 while i > min_viable_chunk_size_index: UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i]) if not viable: UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2 else: UpperCamelCase__ : Optional[int] = i UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2 return candidates[min_viable_chunk_size_index] def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable): UpperCamelCase__ : List[str] = True for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_): assert type(UpperCAmelCase_) == type(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , (list, tuple)): consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) else: consistent &= aa == aa return consistent def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ): UpperCamelCase__ : List[Any] = True UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data) == len(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_) else: # Otherwise, we can reuse the precomputed value UpperCamelCase__ : Optional[int] = False if not consistent: UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
721
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : int = tempfile.mkdtemp() # fmt: off UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'} UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(UpperCAmelCase_) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(UpperCAmelCase_)) UpperCamelCase__ : Dict = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]): return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : str): shutil.rmtree(self.tmpdirname) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Union[str, Any] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer() UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_slow.save_pretrained(self.tmpdirname) UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_) UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_fast.save_pretrained(self.tmpdirname) UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0) UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : int = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : int = self.prepare_image_inputs() UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np') UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Any = 'lower newer' UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def __UpperCamelCase ( self : int): UpperCamelCase__ : Optional[int] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'lower newer' UpperCamelCase__ : List[Any] = self.prepare_image_inputs() UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_): processor() def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : Union[str, Any] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'lower newer' UpperCamelCase__ : Optional[int] = self.prepare_image_inputs() UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_) -> bool: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase__ : Optional[int] = f'Input value of [number={number}] must be an integer' raise TypeError(lowerCamelCase_) if number < 0: return False UpperCamelCase__ : List[Any] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' from typing import Union import fire import torch from tqdm import tqdm def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None: UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_) for k, v in tqdm(state_dict.items()): if not isinstance(lowerCamelCase_ , torch.Tensor): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin') UpperCamelCase__ : int = v.half() if save_path is None: # overwrite src_path UpperCamelCase__ : List[Any] = src_path torch.save(lowerCamelCase_ , lowerCamelCase_) if __name__ == "__main__": fire.Fire(convert)
6
0
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''''' _lowerCamelCase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowerCamelCase = None # compression type in fsspec. ex: "gzip" _lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : Optional[Any] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , **UpperCAmelCase_ : Dict): super().__init__(self , **UpperCAmelCase_) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode UpperCamelCase__ : int = fsspec.open( UpperCAmelCase_ , mode='rb' , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) UpperCamelCase__ : Tuple = os.path.basename(self.file.path.split('::')[0]) UpperCamelCase__ : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex('.')] if '.' in self.compressed_name else self.compressed_name ) UpperCamelCase__ : str = None @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : Dict): # compressed file paths are always relative to the archive root return super()._strip_protocol(UpperCAmelCase_).lstrip('/') def __UpperCamelCase ( self : Any): if self.dir_cache is None: UpperCamelCase__ : Tuple = {**self.file.fs.info(self.file.path), 'name': self.uncompressed_name} UpperCamelCase__ : Dict = {f['name']: f} def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): return self.file.open().read() def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : str , ): UpperCamelCase__ : str = self._strip_protocol(UpperCAmelCase_) if mode != "rb": raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'') return self.file.open() class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''bz2''' _lowerCamelCase = '''bz2''' _lowerCamelCase = '''.bz2''' class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''gzip''' _lowerCamelCase = '''gzip''' _lowerCamelCase = '''.gz''' class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''lz4''' _lowerCamelCase = '''lz4''' _lowerCamelCase = '''.lz4''' class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''xz''' _lowerCamelCase = '''xz''' _lowerCamelCase = '''.xz''' class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''zstd''' _lowerCamelCase = '''zstd''' _lowerCamelCase = '''.zst''' def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str = "rb" , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[dict] = None , UpperCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : Dict , ): super().__init__( fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 UpperCamelCase__ : str = self.file.__enter__ class __lowercase : def __init__( self : Optional[int] , UpperCAmelCase_ : Dict): UpperCamelCase__ : str = file_ def __enter__( self : str): self._file.__enter__() return self def __exit__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any): self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_) def __iter__( self : Tuple): return iter(self._file) def __UpperCamelCase ( self : Any): return next(self._file) def __getattr__( self : str , UpperCAmelCase_ : Tuple): return getattr(self._file , UpperCAmelCase_) def fixed_enter(*UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict): return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_)) UpperCamelCase__ : str = fixed_enter
701
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''segformer''' def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ): super().__init__(**UpperCAmelCase_) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , ) UpperCamelCase__ : List[Any] = num_channels UpperCamelCase__ : Any = num_encoder_blocks UpperCamelCase__ : Dict = depths UpperCamelCase__ : int = sr_ratios UpperCamelCase__ : str = hidden_sizes UpperCamelCase__ : List[str] = patch_sizes UpperCamelCase__ : Optional[int] = strides UpperCamelCase__ : Dict = mlp_ratios UpperCamelCase__ : List[str] = num_attention_heads UpperCamelCase__ : int = hidden_act UpperCamelCase__ : Any = hidden_dropout_prob UpperCamelCase__ : str = attention_probs_dropout_prob UpperCamelCase__ : List[str] = classifier_dropout_prob UpperCamelCase__ : List[Any] = initializer_range UpperCamelCase__ : Union[str, Any] = drop_path_rate UpperCamelCase__ : int = layer_norm_eps UpperCamelCase__ : Dict = decoder_hidden_size UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_) UpperCamelCase__ : List[str] = semantic_loss_ignore_index class __lowercase (__lowerCamelCase ): _lowerCamelCase = version.parse('''1.11''' ) @property def __UpperCamelCase ( self : Optional[Any]): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def __UpperCamelCase ( self : Optional[Any]): return 1e-4 @property def __UpperCamelCase ( self : Any): return 12
6
0
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCAmelCase__ = TypeVar('KT') lowerCAmelCase__ = TypeVar('VT') class __lowercase (Generic[KT, VT] ): def __init__( self : List[str] , UpperCAmelCase_ : KT | str = "root" , UpperCAmelCase_ : VT | None = None): UpperCamelCase__ : Any = key UpperCamelCase__ : Optional[int] = value UpperCamelCase__ : list[Node[KT, VT]] = [] def __repr__( self : str): return F'Node({self.key}: {self.value})' @property def __UpperCamelCase ( self : Dict): return len(self.forward) class __lowercase (Generic[KT, VT] ): def __init__( self : int , UpperCAmelCase_ : float = 0.5 , UpperCAmelCase_ : int = 16): UpperCamelCase__ : Node[KT, VT] = Node[KT, VT]() UpperCamelCase__ : Any = 0 UpperCamelCase__ : List[str] = p UpperCamelCase__ : List[str] = max_level def __str__( self : List[str]): UpperCamelCase__ : int = list(self) if len(UpperCAmelCase_) == 0: return F'SkipList(level={self.level})' UpperCamelCase__ : Optional[Any] = max((len(str(UpperCAmelCase_)) for item in items) , default=4) UpperCamelCase__ : int = max(UpperCAmelCase_ , 4) + 4 UpperCamelCase__ : Any = self.head UpperCamelCase__ : Union[str, Any] = [] UpperCamelCase__ : Dict = node.forward.copy() lines.append(F'[{node.key}]'.ljust(UpperCAmelCase_ , '-') + '* ' * len(UpperCAmelCase_)) lines.append(' ' * label_size + '| ' * len(UpperCAmelCase_)) while len(node.forward) != 0: UpperCamelCase__ : str = node.forward[0] lines.append( F'[{node.key}]'.ljust(UpperCAmelCase_ , '-') + ' '.join(str(n.key) if n.key == node.key else '|' for n in forwards)) lines.append(' ' * label_size + '| ' * len(UpperCAmelCase_)) UpperCamelCase__ : Tuple = node.forward lines.append('None'.ljust(UpperCAmelCase_) + '* ' * len(UpperCAmelCase_)) return F'SkipList(level={self.level})\n' + "\n".join(UpperCAmelCase_) def __iter__( self : Dict): UpperCamelCase__ : Optional[Any] = self.head while len(node.forward) != 0: yield node.forward[0].key UpperCamelCase__ : str = node.forward[0] def __UpperCamelCase ( self : Any): UpperCamelCase__ : List[str] = 1 while random() < self.p and level < self.max_level: level += 1 return level def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str): UpperCamelCase__ : Dict = [] UpperCamelCase__ : List[str] = self.head for i in reversed(range(self.level)): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCamelCase__ : Dict = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(UpperCAmelCase_) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def __UpperCamelCase ( self : str , UpperCAmelCase_ : KT): UpperCamelCase__ : Optional[Any] = self._locate_node(UpperCAmelCase_) if node is not None: for i, update_node in enumerate(UpperCAmelCase_): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCamelCase__ : int = node.forward[i] else: UpperCamelCase__ : Any = update_node.forward[:i] def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : KT , UpperCAmelCase_ : VT): UpperCamelCase__ : Optional[Any] = self._locate_node(UpperCAmelCase_) if node is not None: UpperCamelCase__ : Optional[int] = value else: UpperCamelCase__ : Tuple = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , UpperCAmelCase_): update_vector.append(self.head) UpperCamelCase__ : Union[str, Any] = level UpperCamelCase__ : List[str] = Node(UpperCAmelCase_ , UpperCAmelCase_) for i, update_node in enumerate(update_vector[:level]): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i]) if update_node.level < i + 1: update_node.forward.append(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = new_node def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : VT): UpperCamelCase__ : Optional[Any] = self._locate_node(UpperCAmelCase_) if node is not None: return node.value return None def __UpperCAmelCase ( ) -> int: UpperCamelCase__ : Optional[Any] = SkipList() skip_list.insert('Key1' , 3) skip_list.insert('Key2' , 12) skip_list.insert('Key3' , 41) skip_list.insert('Key4' , -19) UpperCamelCase__ : str = skip_list.head UpperCamelCase__ : Dict = {} while node.level != 0: UpperCamelCase__ : Union[str, Any] = node.forward[0] UpperCamelCase__ : List[str] = node.value assert len(lowerCamelCase_) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def __UpperCAmelCase ( ) -> List[Any]: UpperCamelCase__ : Optional[Any] = SkipList() skip_list.insert('Key1' , 10) skip_list.insert('Key1' , 12) skip_list.insert('Key5' , 7) skip_list.insert('Key7' , 10) skip_list.insert('Key10' , 5) skip_list.insert('Key7' , 7) skip_list.insert('Key5' , 5) skip_list.insert('Key10' , 10) UpperCamelCase__ : Union[str, Any] = skip_list.head UpperCamelCase__ : Dict = {} while node.level != 0: UpperCamelCase__ : List[str] = node.forward[0] UpperCamelCase__ : List[str] = node.value if len(lowerCamelCase_) != 4: print() assert len(lowerCamelCase_) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def __UpperCAmelCase ( ) -> Optional[int]: UpperCamelCase__ : Optional[Any] = SkipList() assert skip_list.find('Some key') is None def __UpperCAmelCase ( ) -> List[str]: UpperCamelCase__ : str = SkipList() skip_list.insert('Key2' , 20) assert skip_list.find('Key2') == 20 skip_list.insert('Some Key' , 10) skip_list.insert('Key2' , 8) skip_list.insert('V' , 13) assert skip_list.find('Y') is None assert skip_list.find('Key2') == 8 assert skip_list.find('Some Key') == 10 assert skip_list.find('V') == 13 def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : List[str] = SkipList() skip_list.delete('Some key') assert len(skip_list.head.forward) == 0 def __UpperCAmelCase ( ) -> Optional[int]: UpperCamelCase__ : Tuple = SkipList() skip_list.insert('Key1' , 12) skip_list.insert('V' , 13) skip_list.insert('X' , 14) skip_list.insert('Key2' , 15) skip_list.delete('V') skip_list.delete('Key2') assert skip_list.find('V') is None assert skip_list.find('Key2') is None def __UpperCAmelCase ( ) -> str: UpperCamelCase__ : Optional[int] = SkipList() skip_list.insert('Key1' , 12) skip_list.insert('V' , 13) skip_list.insert('X' , 14) skip_list.insert('Key2' , 15) skip_list.delete('V') assert skip_list.find('V') is None assert skip_list.find('X') == 14 assert skip_list.find('Key1') == 12 assert skip_list.find('Key2') == 15 skip_list.delete('X') assert skip_list.find('V') is None assert skip_list.find('X') is None assert skip_list.find('Key1') == 12 assert skip_list.find('Key2') == 15 skip_list.delete('Key1') assert skip_list.find('V') is None assert skip_list.find('X') is None assert skip_list.find('Key1') is None assert skip_list.find('Key2') == 15 skip_list.delete('Key2') assert skip_list.find('V') is None assert skip_list.find('X') is None assert skip_list.find('Key1') is None assert skip_list.find('Key2') is None def __UpperCAmelCase ( ) -> Any: UpperCamelCase__ : Optional[Any] = SkipList() skip_list.insert('Key1' , 12) skip_list.insert('V' , 13) skip_list.insert('X' , 142) skip_list.insert('Key2' , 15) skip_list.delete('X') def traverse_keys(lowerCamelCase_): yield node.key for forward_node in node.forward: yield from traverse_keys(lowerCamelCase_) assert len(set(traverse_keys(skip_list.head))) == 4 def __UpperCAmelCase ( ) -> List[str]: def is_sorted(lowerCamelCase_): return all(next_item >= item for item, next_item in zip(lowerCamelCase_ , lst[1:])) UpperCamelCase__ : Any = SkipList() for i in range(10): skip_list.insert(lowerCamelCase_ , lowerCamelCase_) assert is_sorted(list(lowerCamelCase_)) skip_list.delete(5) skip_list.delete(8) skip_list.delete(2) assert is_sorted(list(lowerCamelCase_)) skip_list.insert(-12 , -12) skip_list.insert(77 , 77) assert is_sorted(list(lowerCamelCase_)) def __UpperCAmelCase ( ) -> Optional[int]: for _ in range(100): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __UpperCAmelCase ( ) -> Any: UpperCamelCase__ : List[Any] = SkipList() skip_list.insert(2 , '2') skip_list.insert(4 , '4') skip_list.insert(6 , '4') skip_list.insert(4 , '5') skip_list.insert(8 , '4') skip_list.insert(9 , '4') skip_list.delete(4) print(lowerCamelCase_) if __name__ == "__main__": import doctest doctest.testmod() main()
702
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]: return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)] if __name__ == "__main__": from doctest import testmod testmod()
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_ = 600_851_475_143) -> int: try: UpperCamelCase__ : Dict = int(lowerCamelCase_) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.') if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.') UpperCamelCase__ : Optional[int] = 1 UpperCamelCase__ : Union[str, Any] = 2 while i * i <= n: while n % i == 0: UpperCamelCase__ : List[str] = i n //= i i += 1 if n > 1: UpperCamelCase__ : str = n return int(lowerCamelCase_) if __name__ == "__main__": print(f'''{solution() = }''')
703
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def __UpperCAmelCase ( lowerCamelCase_) -> float: return np.dot(lowerCamelCase_ , lowerCamelCase_) class __lowercase : def __init__( self : Tuple , *, UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ): UpperCamelCase__ : Union[str, Any] = regularization UpperCamelCase__ : Optional[int] = gamma if kernel == "linear": UpperCamelCase__ : List[str] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') UpperCamelCase__ : Union[str, Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}' raise ValueError(UpperCAmelCase_) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.dot(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray): return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Any = observations UpperCamelCase__ : Tuple = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_) def to_minimize(UpperCAmelCase_ : ndarray) -> float: UpperCamelCase__ : Union[str, Any] = 0 ((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_) for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(UpperCAmelCase_) UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0) UpperCamelCase__ : Dict = Bounds(0 , self.regularization) UpperCamelCase__ : Any = minimize( UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x UpperCamelCase__ : str = l_star # calculating mean offset of separation plane to points UpperCamelCase__ : Any = 0 for i in range(UpperCAmelCase_): for j in range(UpperCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) UpperCamelCase__ : List[str] = s / n def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray): UpperCamelCase__ : Optional[int] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , UpperCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
6
0
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowercase (__lowerCamelCase ): _lowerCamelCase = ['''image_processor''', '''tokenizer'''] _lowerCamelCase = '''OwlViTImageProcessor''' _lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[Any]): UpperCamelCase__ : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCAmelCase_ , ) UpperCamelCase__ : str = kwargs.pop('feature_extractor') UpperCamelCase__ : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__( self : Tuple , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]="max_length" , UpperCAmelCase_ : str="np" , **UpperCAmelCase_ : Optional[Any]): if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.') if text is not None: if isinstance(UpperCAmelCase_ , UpperCAmelCase_) or (isinstance(UpperCAmelCase_ , UpperCAmelCase_) and not isinstance(text[0] , UpperCAmelCase_)): UpperCamelCase__ : Union[str, Any] = [self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)] elif isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(text[0] , UpperCAmelCase_): UpperCamelCase__ : Union[str, Any] = [] # Maximum number of queries across batch UpperCamelCase__ : Tuple = max([len(UpperCAmelCase_) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase_) != max_num_queries: UpperCamelCase__ : List[Any] = t + [' '] * (max_num_queries - len(UpperCAmelCase_)) UpperCamelCase__ : str = self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) encodings.append(UpperCAmelCase_) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings') if return_tensors == "np": UpperCamelCase__ : List[str] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0) UpperCamelCase__ : List[str] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp UpperCamelCase__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0) UpperCamelCase__ : List[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch UpperCamelCase__ : Dict = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0) UpperCamelCase__ : Union[str, Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf UpperCamelCase__ : List[str] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0) UpperCamelCase__ : Union[str, Any] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0) else: raise ValueError('Target return tensor type could not be returned') UpperCamelCase__ : Optional[Any] = BatchEncoding() UpperCamelCase__ : str = input_ids UpperCamelCase__ : Optional[Any] = attention_mask if query_images is not None: UpperCamelCase__ : Tuple = BatchEncoding() UpperCamelCase__ : Tuple = self.image_processor( UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_).pixel_values UpperCamelCase__ : Union[str, Any] = query_pixel_values if images is not None: UpperCamelCase__ : List[str] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) if text is not None and images is not None: UpperCamelCase__ : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: UpperCamelCase__ : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_) def __UpperCamelCase ( self : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]): return self.image_processor.post_process(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]): return self.image_processor.post_process_object_detection(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def __UpperCamelCase ( self : str): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any]): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , ) return self.image_processor
704
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_) -> Any: UpperCamelCase__ : Dict = DPTConfig() if "large" in checkpoint_url: UpperCamelCase__ : List[str] = 1_024 UpperCamelCase__ : List[str] = 4_096 UpperCamelCase__ : Optional[int] = 24 UpperCamelCase__ : List[str] = 16 UpperCamelCase__ : List[str] = [5, 11, 17, 23] UpperCamelCase__ : str = [256, 512, 1_024, 1_024] UpperCamelCase__ : Union[str, Any] = (1, 384, 384) if "ade" in checkpoint_url: UpperCamelCase__ : int = True UpperCamelCase__ : Optional[Any] = 150 UpperCamelCase__ : int = 'huggingface/label-files' UpperCamelCase__ : List[Any] = 'ade20k-id2label.json' UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r')) UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()} UpperCamelCase__ : Union[str, Any] = idalabel UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Any = [1, 150, 480, 480] return config, expected_shape def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder') if "pretrained.model" in name: UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings') if "patch_embed" in name: UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings') if "pos_embed" in name: UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings') if "attn.proj" in name: UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense') if "proj" in name and "project" not in name: UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection') if "blocks" in name: UpperCamelCase__ : int = name.replace('blocks' , 'layer') if "mlp.fc1" in name: UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense') if "mlp.fc2" in name: UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense') if "norm1" in name: UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before') if "norm2" in name: UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after') if "scratch.output_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head') if "scratch" in name: UpperCamelCase__ : int = name.replace('scratch' , 'neck') if "layer1_rn" in name: UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0') if "layer2_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1') if "layer3_rn" in name: UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2') if "layer4_rn" in name: UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3') if "refinenet" in name: UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}') if "out_conv" in name: UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection') if "resConfUnit1" in name: UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1') if "resConfUnit2" in name: UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2') if "conv1" in name: UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1') if "conv2" in name: UpperCamelCase__ : int = name.replace('conv2' , 'convolution2') # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0') if "pretrained.act_postprocess2.0.project.0" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0') if "pretrained.act_postprocess3.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0') if "pretrained.act_postprocess4.0.project.0" in name: UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0') # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection') if "pretrained.act_postprocess1.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize') if "pretrained.act_postprocess2.3" in name: UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection') if "pretrained.act_postprocess2.4" in name: UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize') if "pretrained.act_postprocess3.3" in name: UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection') if "pretrained.act_postprocess4.3" in name: UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection') if "pretrained.act_postprocess4.4" in name: UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize') if "pretrained" in name: UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt') if "bn" in name: UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm') if "head" in name: UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head') if "encoder.norm" in name: UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm') if "auxlayer" in name: UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head') return name def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any: for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight') UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias') # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :] UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size] UpperCamelCase__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw) return im @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_) # load original state_dict from URL UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu') # remove certain keys remove_ignore_keys_(lowerCamelCase_) # rename keys for key in state_dict.copy().keys(): UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_) UpperCamelCase__ : List[str] = val # read in qkv matrices read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_) # load HuggingFace model UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_) model.load_state_dict(lowerCamelCase_) model.eval() # Check outputs on an image UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384 UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_) UpperCamelCase__ : int = prepare_img() UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt') # forward pass UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth # Assert logits UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]]) if "ade" in checkpoint_url: UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]]) assert outputs.shape == torch.Size(lowerCamelCase_) assert ( torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_) ) Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase_) if push_to_hub: print('Pushing model to hub...') model.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) lowerCAmelCase__ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
6
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase (__lowerCamelCase , unittest.TestCase ): _lowerCamelCase = UnCLIPImageVariationPipeline _lowerCamelCase = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''} _lowerCamelCase = IMAGE_VARIATION_BATCH_PARAMS _lowerCamelCase = [ '''generator''', '''return_dict''', '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] _lowerCamelCase = False @property def __UpperCamelCase ( self : Tuple): return 32 @property def __UpperCamelCase ( self : Tuple): return 32 @property def __UpperCamelCase ( self : int): return self.time_input_dim @property def __UpperCamelCase ( self : Any): return self.time_input_dim * 4 @property def __UpperCamelCase ( self : List[Any]): return 100 @property def __UpperCamelCase ( self : Any): UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def __UpperCamelCase ( self : Union[str, Any]): torch.manual_seed(0) UpperCamelCase__ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(UpperCAmelCase_) @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : Tuple = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(UpperCAmelCase_) @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : Dict = { 'clip_embeddings_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'cross_attention_dim': self.cross_attention_dim, } UpperCamelCase__ : int = UnCLIPTextProjModel(**UpperCAmelCase_) return model @property def __UpperCamelCase ( self : Union[str, Any]): torch.manual_seed(0) UpperCamelCase__ : str = { 'sample_size': 32, # RGB in channels 'in_channels': 3, # Out channels is double in channels because predicts mean and variance 'out_channels': 6, 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': 'identity', } UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(**UpperCAmelCase_) return model @property def __UpperCamelCase ( self : List[str]): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __UpperCamelCase ( self : List[Any]): torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs) return model @property def __UpperCamelCase ( self : Optional[Any]): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1) UpperCamelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs) return model def __UpperCamelCase ( self : int): UpperCamelCase__ : Optional[int] = self.dummy_decoder UpperCamelCase__ : Dict = self.dummy_text_proj UpperCamelCase__ : int = self.dummy_text_encoder UpperCamelCase__ : Tuple = self.dummy_tokenizer UpperCamelCase__ : Optional[Any] = self.dummy_super_res_first UpperCamelCase__ : List[str] = self.dummy_super_res_last UpperCamelCase__ : Optional[Any] = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) UpperCamelCase__ : Tuple = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) UpperCamelCase__ : List[Any] = CLIPImageProcessor(crop_size=32 , size=32) UpperCamelCase__ : Optional[Any] = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Dict=True): UpperCamelCase__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_)).to(UpperCAmelCase_) if str(UpperCAmelCase_).startswith('mps'): UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_) else: UpperCamelCase__ : str = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_) if pil_image: UpperCamelCase__ : List[Any] = input_image * 0.5 + 0.5 UpperCamelCase__ : str = input_image.clamp(0 , 1) UpperCamelCase__ : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy() UpperCamelCase__ : int = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_)[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Any = 'cpu' UpperCamelCase__ : Optional[Any] = self.get_dummy_components() UpperCamelCase__ : Dict = self.pipeline_class(**UpperCAmelCase_) UpperCamelCase__ : int = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : Any = pipe(**UpperCAmelCase_) UpperCamelCase__ : int = output.images UpperCamelCase__ : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : List[str] = pipe( **UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : int = image[0, -3:, -3:, -1] UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : int = np.array( [ 0.99_97, 0.00_02, 0.99_97, 0.99_97, 0.99_69, 0.00_23, 0.99_97, 0.99_69, 0.99_70, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Tuple = 'cpu' UpperCamelCase__ : Dict = self.get_dummy_components() UpperCamelCase__ : Any = self.pipeline_class(**UpperCAmelCase_) UpperCamelCase__ : Any = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = pipe(**UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : Dict = pipe( **UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : Dict = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : int = 'cpu' UpperCamelCase__ : int = self.get_dummy_components() UpperCamelCase__ : Tuple = self.pipeline_class(**UpperCAmelCase_) UpperCamelCase__ : Dict = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : Dict = [ pipeline_inputs['image'], pipeline_inputs['image'], ] UpperCamelCase__ : int = pipe(**UpperCAmelCase_) UpperCamelCase__ : Optional[int] = output.images UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : List[str] = [ tuple_pipeline_inputs['image'], tuple_pipeline_inputs['image'], ] UpperCamelCase__ : str = pipe( **UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : str = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) UpperCamelCase__ : Any = np.array( [ 0.99_97, 0.99_89, 0.00_08, 0.00_21, 0.99_60, 0.00_18, 0.00_14, 0.00_02, 0.99_33, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[Any] = torch.device('cpu') class __lowercase : _lowerCamelCase = 1 UpperCamelCase__ : List[Any] = self.get_dummy_components() UpperCamelCase__ : Dict = self.pipeline_class(**UpperCAmelCase_) UpperCamelCase__ : str = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Union[str, Any] = pipe.decoder.dtype UpperCamelCase__ : Optional[int] = 1 UpperCamelCase__ : Optional[Any] = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) UpperCamelCase__ : Optional[int] = pipe.prepare_latents( UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler()) UpperCamelCase__ : Any = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) UpperCamelCase__ : str = pipe.prepare_latents( UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler()) UpperCamelCase__ : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = pipe( **UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_).images UpperCamelCase__ : int = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_) # Don't pass image, instead pass embedding UpperCamelCase__ : Dict = pipeline_inputs.pop('image') UpperCamelCase__ : int = pipe.image_encoder(UpperCAmelCase_).image_embeds UpperCamelCase__ : int = pipe( **UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ , image_embeddings=UpperCAmelCase_ , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a).max() < 1e-4 @skip_mps def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[int] = torch_device == 'cpu' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor UpperCamelCase__ : Optional[Any] = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=UpperCAmelCase_ , expected_max_diff=UpperCAmelCase_) @skip_mps def __UpperCamelCase ( self : str): UpperCamelCase__ : List[Any] = torch_device == 'cpu' UpperCamelCase__ : Dict = True UpperCamelCase__ : Optional[int] = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] self._test_inference_batch_single_identical( test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , ) def __UpperCamelCase ( self : Any): UpperCamelCase__ : List[Any] = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes UpperCamelCase__ : Tuple = [2, 3] self._test_inference_batch_consistent( batch_sizes=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=UpperCAmelCase_) @skip_mps def __UpperCamelCase ( self : List[Any]): return super().test_dict_tuple_outputs_equivalent() @skip_mps def __UpperCamelCase ( self : str): return super().test_save_load_local() @skip_mps def __UpperCamelCase ( self : Optional[Any]): return super().test_save_load_optional_components() @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png') UpperCamelCase__ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy') UpperCamelCase__ : Any = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa) UpperCamelCase__ : Dict = pipeline.to(UpperCAmelCase_) pipeline.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = torch.Generator(device='cpu').manual_seed(0) UpperCamelCase__ : Optional[int] = pipeline( UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='np' , ) UpperCamelCase__ : List[Any] = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ , 15)
705
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ): UpperCamelCase__ : Tuple = parent UpperCamelCase__ : List[str] = batch_size UpperCamelCase__ : Optional[Any] = image_size UpperCamelCase__ : Optional[Any] = patch_size UpperCamelCase__ : List[str] = num_channels UpperCamelCase__ : Union[str, Any] = is_training UpperCamelCase__ : int = use_labels UpperCamelCase__ : Optional[int] = hidden_size UpperCamelCase__ : Any = num_hidden_layers UpperCamelCase__ : str = num_attention_heads UpperCamelCase__ : str = intermediate_size UpperCamelCase__ : Union[str, Any] = hidden_act UpperCamelCase__ : Optional[int] = hidden_dropout_prob UpperCamelCase__ : Tuple = attention_probs_dropout_prob UpperCamelCase__ : Any = type_sequence_label_size UpperCamelCase__ : int = initializer_range UpperCamelCase__ : Optional[int] = mask_ratio UpperCamelCase__ : int = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase__ : str = (image_size // patch_size) ** 2 UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCamelCase__ : List[str] = None if self.use_labels: UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCamelCase__ : Any = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : List[Any]): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]): UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple): UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : Dict = model(UpperCAmelCase_) UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2 UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) # test greyscale images UpperCamelCase__ : List[Any] = 1 UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels)) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[str] = self.prepare_config_and_inputs() UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs UpperCamelCase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): _lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () _lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : List[str] = ViTMAEModelTester(self) UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37) def __UpperCamelCase ( self : Any): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds') def __UpperCamelCase ( self : Tuple): pass def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) UpperCamelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear)) def __UpperCamelCase ( self : List[str]): UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_) UpperCamelCase__ : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ : Any = [*signature.parameters.keys()] UpperCamelCase__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]): # make masks reproducible np.random.seed(2) UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase__ : List[str] = pt_noise super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) UpperCamelCase__ : Dict = outputs[0].cpu().numpy() UpperCamelCase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_) model.to(UpperCAmelCase_) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) # Make sure we don't have nans UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy() UpperCamelCase__ : Any = 0 UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(UpperCAmelCase_ , 1e-5) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Tuple): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Optional[int]): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def __UpperCamelCase ( self : Tuple): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load') def __UpperCamelCase ( self : Tuple): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self : Optional[int]): pass @slow def __UpperCamelCase ( self : Optional[Any]): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) def __UpperCAmelCase ( ) -> Optional[Any]: UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class __lowercase (unittest.TestCase ): @cached_property def __UpperCamelCase ( self : int): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None @slow def __UpperCamelCase ( self : str): # make random mask reproducible across the PT and TF model np.random.seed(2) UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_) UpperCamelCase__ : Tuple = self.default_image_processor UpperCamelCase__ : Dict = prepare_img() UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase__ : Union[str, Any] = ViTMAEConfig() UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_)) # verify the logits UpperCamelCase__ : Tuple = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape , UpperCAmelCase_) UpperCamelCase__ : Any = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]]) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
6
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCAmelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=100 , lowerCamelCase_=" ") -> List[str]: UpperCamelCase__ : Optional[int] = text.split(lowerCamelCase_) return [character.join(text[i : i + n]).strip() for i in range(0 , len(lowerCamelCase_) , lowerCamelCase_)] def __UpperCAmelCase ( lowerCamelCase_) -> dict: UpperCamelCase__ : Any = [], [] for title, text in zip(documents['title'] , documents['text']): if text is not None: for passage in split_text(lowerCamelCase_): titles.append(title if title is not None else '') texts.append(lowerCamelCase_) return {"title": titles, "text": texts} def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> dict: UpperCamelCase__ : str = ctx_tokenizer( documents['title'] , documents['text'] , truncation=lowerCamelCase_ , padding='longest' , return_tensors='pt')['input_ids'] UpperCamelCase__ : Dict = ctx_encoder(input_ids.to(device=lowerCamelCase_) , return_dict=lowerCamelCase_).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int: ###################################### logger.info('Step 1 - Create the dataset') ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCamelCase__ : Union[str, Any] = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text']) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCamelCase__ : str = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=processing_args.num_proc) # And compute the embeddings UpperCamelCase__ : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=lowerCamelCase_) UpperCamelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) UpperCamelCase__ : str = Features( {'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))}) # optional, save as float32 instead of float64 to save space UpperCamelCase__ : List[Any] = dataset.map( partial(lowerCamelCase_ , ctx_encoder=lowerCamelCase_ , ctx_tokenizer=lowerCamelCase_) , batched=lowerCamelCase_ , batch_size=processing_args.batch_size , features=lowerCamelCase_ , ) # And finally save your dataset UpperCamelCase__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset') dataset.save_to_disk(lowerCamelCase_) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset') ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCamelCase__ : List[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase_) # And save the index UpperCamelCase__ : int = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss') dataset.get_index('embeddings').save(lowerCamelCase_) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __lowercase : _lowerCamelCase = field( default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , ) _lowerCamelCase = field( default=__lowerCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , ) _lowerCamelCase = field( default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , ) _lowerCamelCase = field( default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={ '''help''': ( '''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or''' ''' \'facebook/dpr-ctx_encoder-multiset-base\'''' ) } , ) _lowerCamelCase = field( default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , ) @dataclass class __lowercase : _lowerCamelCase = field( default=__lowerCamelCase , metadata={ '''help''': '''The number of processes to use to split the documents into passages. Default is single process.''' } , ) _lowerCamelCase = field( default=16 , metadata={ '''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.''' } , ) @dataclass class __lowercase : _lowerCamelCase = field( default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , ) _lowerCamelCase = field( default=128 , metadata={ '''help''': ( '''The number of bi-directional links created for every new element during the HNSW index construction.''' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
706
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowercase (metaclass=__lowerCamelCase ): _lowerCamelCase = ['''torch''', '''scipy'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int): requires_backends(self , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]): requires_backends(cls , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any): requires_backends(cls , ['torch', 'scipy'])
6
0
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]: UpperCamelCase__ : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_) UpperCamelCase__ : str = downstream_dict['projector.weight'] UpperCamelCase__ : Dict = downstream_dict['projector.bias'] UpperCamelCase__ : Optional[Any] = downstream_dict['model.post_net.linear.weight'] UpperCamelCase__ : Optional[int] = downstream_dict['model.post_net.linear.bias'] return model def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]: UpperCamelCase__ : Optional[Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_) UpperCamelCase__ : Tuple = downstream_dict['model.linear.weight'] UpperCamelCase__ : List[str] = downstream_dict['model.linear.bias'] return model def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]: UpperCamelCase__ : Optional[Any] = WavaVecaForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_) UpperCamelCase__ : str = downstream_dict['connector.weight'] UpperCamelCase__ : Any = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel): UpperCamelCase__ : int = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] UpperCamelCase__ : List[Any] = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] UpperCamelCase__ : Any = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] UpperCamelCase__ : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] UpperCamelCase__ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] UpperCamelCase__ : List[str] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] UpperCamelCase__ : str = downstream_dict['objective.W'] return model @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__ : Optional[int] = torch.load(lowerCamelCase_ , map_location='cpu') UpperCamelCase__ : Optional[int] = checkpoint['Downstream'] UpperCamelCase__ : int = WavaVecaConfig.from_pretrained(lowerCamelCase_) UpperCamelCase__ : str = WavaVecaFeatureExtractor.from_pretrained( lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_) UpperCamelCase__ : List[str] = hf_config.architectures[0] if arch.endswith('ForSequenceClassification'): UpperCamelCase__ : int = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) elif arch.endswith('ForAudioFrameClassification'): UpperCamelCase__ : Union[str, Any] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) elif arch.endswith('ForXVector'): UpperCamelCase__ : Optional[int] = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}') if hf_config.use_weighted_layer_sum: UpperCamelCase__ : Optional[int] = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(lowerCamelCase_) hf_model.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') lowerCAmelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
707
'''simple docstring''' class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False): # Mapping from the first character of the prefix of the node UpperCamelCase__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word UpperCamelCase__ : List[Any] = is_leaf UpperCamelCase__ : Optional[Any] = prefix def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[int] = 0 for q, w in zip(self.prefix , UpperCAmelCase_): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]): for word in words: self.insert(UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: UpperCamelCase__ : Optional[Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_) else: UpperCamelCase__ : int = self.nodes[word[0]] UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match( UpperCAmelCase_) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(UpperCAmelCase_) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: UpperCamelCase__ : Tuple = remaining_prefix UpperCamelCase__ : str = self.nodes[matching_string[0]] UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : str = aux_node if remaining_word == "": UpperCamelCase__ : int = True else: self.nodes[matching_string[0]].insert(UpperCAmelCase_) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_) if not incoming_node: return False else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match( UpperCAmelCase_) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(UpperCAmelCase_) def __UpperCamelCase ( self : str , UpperCAmelCase_ : str): UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_) if not incoming_node: return False else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match( UpperCAmelCase_) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(UpperCAmelCase_) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes) == 1 and not self.is_leaf: UpperCamelCase__ : List[str] = list(self.nodes.values())[0] UpperCamelCase__ : Tuple = merging_node.is_leaf self.prefix += merging_node.prefix UpperCamelCase__ : Tuple = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes) > 1: UpperCamelCase__ : str = False # If there is 1 edge, we merge it with its child else: UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0] UpperCamelCase__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix UpperCamelCase__ : Union[str, Any] = merging_node.nodes return True def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0): if self.prefix != "": print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '') for value in self.nodes.values(): value.print_tree(height + 1) def __UpperCAmelCase ( ) -> bool: UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split() UpperCamelCase__ : List[Any] = RadixNode() root.insert_many(lowerCamelCase_) assert all(root.find(lowerCamelCase_) for word in words) assert not root.find('bandanas') assert not root.find('apps') root.delete('all') assert not root.find('all') root.delete('banana') assert not root.find('banana') assert root.find('bananas') return True def __UpperCAmelCase ( ) -> None: assert test_trie() def __UpperCAmelCase ( ) -> None: UpperCamelCase__ : List[Any] = RadixNode() UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split() root.insert_many(lowerCamelCase_) print('Words:' , lowerCamelCase_) print('Tree:') root.print_tree() if __name__ == "__main__": main()
6
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = ['''input_ids''', '''attention_mask'''] _lowerCamelCase = MBartTokenizer _lowerCamelCase = [] _lowerCamelCase = [] def __init__( self : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : Union[str, Any]="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) UpperCamelCase__ : Union[str, Any] = vocab_file UpperCamelCase__ : Dict = False if not self.vocab_file else True UpperCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens}) UpperCamelCase__ : Optional[Any] = { lang_code: self.convert_tokens_to_ids(UpperCAmelCase_) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCamelCase__ : Tuple = src_lang if src_lang is not None else 'en_XX' UpperCamelCase__ : Dict = self.convert_tokens_to_ids(self._src_lang) UpperCamelCase__ : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def __UpperCamelCase ( self : Any): return self._src_lang @src_lang.setter def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str): UpperCamelCase__ : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Any = [self.sep_token_id] UpperCamelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : Optional[int]): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') UpperCamelCase__ : int = src_lang UpperCamelCase__ : Tuple = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_) UpperCamelCase__ : Dict = self.convert_tokens_to_ids(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = tgt_lang_id return inputs def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "en_XX" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "ro_RO" , **UpperCAmelCase_ : Dict , ): UpperCamelCase__ : List[Any] = src_lang UpperCamelCase__ : Tuple = tgt_lang return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : List[str]): return self.set_src_lang_special_tokens(self.src_lang) def __UpperCamelCase ( self : Optional[Any]): return self.set_tgt_lang_special_tokens(self.tgt_lang) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Any): UpperCamelCase__ : Any = self.convert_tokens_to_ids(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : str = [self.eos_token_id, self.cur_lang_code] UpperCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens) UpperCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens) UpperCamelCase__ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str): UpperCamelCase__ : Tuple = self.convert_tokens_to_ids(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = [] UpperCamelCase__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens) UpperCamelCase__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens) UpperCamelCase__ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(UpperCAmelCase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory.') return UpperCamelCase__ : str = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_): copyfile(self.vocab_file , UpperCAmelCase_) return (out_vocab_file,)
708
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase__ = False class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : int): return 12 @property def __UpperCamelCase ( self : Tuple): return 12 @property def __UpperCamelCase ( self : Dict): return 32 @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def __UpperCamelCase ( self : List[str]): torch.manual_seed(0) UpperCamelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = 12 UpperCamelCase__ : Dict = 12 UpperCamelCase__ : Union[str, Any] = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_) return model def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = 'cpu' UpperCamelCase__ : List[str] = self.dummy_vqvae UpperCamelCase__ : List[str] = self.dummy_text_encoder UpperCamelCase__ : Optional[int] = self.dummy_tokenizer UpperCamelCase__ : List[str] = self.dummy_transformer UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_) UpperCamelCase__ : int = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Optional[int] = 'cpu' UpperCamelCase__ : str = self.dummy_vqvae UpperCamelCase__ : Any = self.dummy_text_encoder UpperCamelCase__ : List[Any] = self.dummy_tokenizer UpperCamelCase__ : Dict = self.dummy_transformer UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) UpperCamelCase__ : str = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : str = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : int = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy') UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq') UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_) pipeline.set_progress_bar_config(disable=UpperCAmelCase_) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : int = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , ) UpperCamelCase__ : int = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'], 'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['VisionTextDualEncoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['FlaxVisionTextDualEncoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['TFVisionTextDualEncoderModel'] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
709
'''simple docstring''' import numpy as np from PIL import Image def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : str = 0 # compute the shape of the output matrix UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape)) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size]) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[int] = 0 return updated_arr def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray: UpperCamelCase__ : Tuple = np.array(lowerCamelCase_) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix') UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : int = 0 UpperCamelCase__ : List[str] = 0 UpperCamelCase__ : List[Any] = 0 # compute the shape of the output matrix UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape)) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size])) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 UpperCamelCase__ : Union[str, Any] = 0 UpperCamelCase__ : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image lowerCAmelCase__ = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
6
0
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def __UpperCAmelCase ( lowerCamelCase_) -> List[str]: UpperCamelCase__ : Optional[Any] = model.config UpperCamelCase__ : Optional[Any] = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) UpperCamelCase__ : Optional[Any] = MBartConfig( is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer) , scale_embedding=lowerCamelCase_ , add_final_layer_norm=lowerCamelCase_ , ) return encoder_config, decoder_config def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]: if "encoder.model" in name: UpperCamelCase__ : Any = name.replace('encoder.model' , 'encoder') if "decoder.model" in name: UpperCamelCase__ : str = name.replace('decoder.model' , 'decoder') if "patch_embed.proj" in name: UpperCamelCase__ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection') if "patch_embed.norm" in name: UpperCamelCase__ : int = name.replace('patch_embed.norm' , 'embeddings.norm') if name.startswith('encoder'): if "layers" in name: UpperCamelCase__ : int = 'encoder.' + name if "attn.proj" in name: UpperCamelCase__ : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense') if "attn" in name and "mask" not in name: UpperCamelCase__ : Any = name.replace('attn' , 'attention.self') if "norm1" in name: UpperCamelCase__ : Tuple = name.replace('norm1' , 'layernorm_before') if "norm2" in name: UpperCamelCase__ : Union[str, Any] = name.replace('norm2' , 'layernorm_after') if "mlp.fc1" in name: UpperCamelCase__ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense') if "mlp.fc2" in name: UpperCamelCase__ : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense') if name == "encoder.norm.weight": UpperCamelCase__ : Optional[int] = 'encoder.layernorm.weight' if name == "encoder.norm.bias": UpperCamelCase__ : Tuple = 'encoder.layernorm.bias' return name def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str: for key in orig_state_dict.copy().keys(): UpperCamelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_) if "qkv" in key: UpperCamelCase__ : List[Any] = key.split('.') UpperCamelCase__ : Optional[Any] = int(key_split[3]) UpperCamelCase__ : Any = int(key_split[5]) UpperCamelCase__ : List[str] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCamelCase__ : Any = val[:dim, :] UpperCamelCase__ : List[Any] = val[dim : dim * 2, :] UpperCamelCase__ : Optional[Any] = val[-dim:, :] else: UpperCamelCase__ : Dict = val[:dim] UpperCamelCase__ : List[str] = val[dim : dim * 2] UpperCamelCase__ : str = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: UpperCamelCase__ : Tuple = val return orig_state_dict def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False) -> Dict: # load original model UpperCamelCase__ : str = DonutModel.from_pretrained(lowerCamelCase_).eval() # load HuggingFace model UpperCamelCase__ : Optional[Any] = get_configs(lowerCamelCase_) UpperCamelCase__ : int = DonutSwinModel(lowerCamelCase_) UpperCamelCase__ : str = MBartForCausalLM(lowerCamelCase_) UpperCamelCase__ : List[Any] = VisionEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_) model.eval() UpperCamelCase__ : List[str] = original_model.state_dict() UpperCamelCase__ : str = convert_state_dict(lowerCamelCase_ , lowerCamelCase_) model.load_state_dict(lowerCamelCase_) # verify results on scanned document UpperCamelCase__ : Dict = load_dataset('hf-internal-testing/example-documents') UpperCamelCase__ : int = dataset['test'][0]['image'].convert('RGB') UpperCamelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase_ , from_slow=lowerCamelCase_) UpperCamelCase__ : int = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1]) UpperCamelCase__ : Any = DonutProcessor(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : Union[str, Any] = processor(lowerCamelCase_ , return_tensors='pt').pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": UpperCamelCase__ : int = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' UpperCamelCase__ : List[str] = 'When is the coffee break?' UpperCamelCase__ : Optional[int] = task_prompt.replace('{user_input}' , lowerCamelCase_) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": UpperCamelCase__ : Any = '<s_rvlcdip>' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: UpperCamelCase__ : Dict = '<s_cord>' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": UpperCamelCase__ : Optional[int] = 's_cord-v2>' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": UpperCamelCase__ : Optional[int] = '<s_zhtrainticket>' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt UpperCamelCase__ : Union[str, Any] = 'hello world' else: raise ValueError('Model name not supported') UpperCamelCase__ : Optional[int] = original_model.decoder.tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors='pt')[ 'input_ids' ] UpperCamelCase__ : Union[str, Any] = original_model.encoder.model.patch_embed(lowerCamelCase_) UpperCamelCase__ : Any = model.encoder.embeddings(lowerCamelCase_) assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3) # verify encoder hidden states UpperCamelCase__ : int = original_model.encoder(lowerCamelCase_) UpperCamelCase__ : Tuple = model.encoder(lowerCamelCase_).last_hidden_state assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2) # verify decoder hidden states UpperCamelCase__ : Optional[int] = original_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).logits UpperCamelCase__ : Optional[int] = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_).logits assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3) print('Looks ok!') if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) processor.save_pretrained(lowerCamelCase_) if push_to_hub: model.push_to_hub('nielsr/' + model_name.split('/')[-1] , commit_message='Update model') processor.push_to_hub('nielsr/' + model_name.split('/')[-1] , commit_message='Update model') if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) lowerCAmelCase__ = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
710
'''simple docstring''' from __future__ import annotations class __lowercase : def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]): UpperCamelCase__ : int = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.') if len(UpperCAmelCase_) != 0: UpperCamelCase__ : str = len(rows[0]) if cols == 0: raise error for row in rows: if len(UpperCAmelCase_) != cols: raise error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise error UpperCamelCase__ : Optional[int] = rows else: UpperCamelCase__ : Optional[Any] = [] def __UpperCamelCase ( self : Union[str, Any]): return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property def __UpperCamelCase ( self : Dict): return len(self.rows) @property def __UpperCamelCase ( self : Tuple): return len(self.rows[0]) @property def __UpperCamelCase ( self : List[Any]): return (self.num_rows, self.num_columns) @property def __UpperCamelCase ( self : Any): return self.order[0] == self.order[1] def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[int] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : Dict): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0]) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0])) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns)) def __UpperCamelCase ( self : str): return bool(self.determinant()) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int): UpperCamelCase__ : Optional[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns) if other_column != column ] for other_row in range(self.num_rows) if other_row != row ] return Matrix(UpperCAmelCase_).determinant() def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int): if (row + column) % 2 == 0: return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : List[Any]): return Matrix( [ [self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)] for row in range(self.num_rows) ]) def __UpperCamelCase ( self : Optional[int]): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns) ] for row in range(self.minors().num_rows) ]) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(UpperCAmelCase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse') return self.adjugate() * (1 / determinant) def __repr__( self : Any): return str(self.rows) def __str__( self : List[Any]): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]' for row in self.rows ]) + "]" ) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in row: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix') if position is None: self.rows.append(UpperCAmelCase_) else: UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:] def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None): UpperCamelCase__ : int = TypeError( 'Column must be a list containing all ints and/or floats') if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise type_error for value in column: if not isinstance(UpperCAmelCase_ , (int, float)): raise type_error if len(UpperCAmelCase_) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix') if position is None: UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)] else: UpperCamelCase__ : str = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows) ] def __eq__( self : List[Any] , UpperCAmelCase_ : object): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): return NotImplemented return self.rows == other.rows def __ne__( self : Any , UpperCAmelCase_ : object): return not self == other def __neg__( self : Union[str, Any]): return self * -1 def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Addition requires matrices of the same order') return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __sub__( self : Tuple , UpperCAmelCase_ : Matrix): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order') return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float): if isinstance(UpperCAmelCase_ , (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows]) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second') return Matrix( [ [Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()] for row in self.rows ]) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix') def __pow__( self : Dict , UpperCAmelCase_ : int): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_): raise TypeError('A Matrix can only be raised to the power of an int') if not self.is_square: raise ValueError('Only square matrices can be raised to a power') if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power') UpperCamelCase__ : str = self for _ in range(other - 1): result *= self return result @classmethod def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]): return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_))) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' import math def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase_) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen') if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase__ = 'Enter the base and the power separated by a comma: ' lowerCAmelCase__ , lowerCAmelCase__ = map(int, input(prompt).split(',')) lowerCAmelCase__ , lowerCAmelCase__ = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase__ = res(xa, ya) lowerCAmelCase__ = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
711
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowercase : def __UpperCamelCase ( self : Union[str, Any]): torch.manual_seed(0) UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : List[str] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Dict): torch.manual_seed(0) UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5') torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ 'ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D', ] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) UpperCamelCase__ : str = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , ) torch.manual_seed(0) UpperCamelCase__ : List[str] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0) UpperCamelCase__ : Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __UpperCamelCase ( self : Any): UpperCamelCase__ : Dict = self.get_dummy_components() UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = inputs['prompt'] UpperCamelCase__ : List[Any] = inputs['generator'] UpperCamelCase__ : Tuple = inputs['num_inference_steps'] UpperCamelCase__ : List[Any] = inputs['output_type'] if "image" in inputs: UpperCamelCase__ : Tuple = inputs['image'] else: UpperCamelCase__ : Union[str, Any] = None if "mask_image" in inputs: UpperCamelCase__ : Optional[int] = inputs['mask_image'] else: UpperCamelCase__ : int = None if "original_image" in inputs: UpperCamelCase__ : List[Any] = inputs['original_image'] else: UpperCamelCase__ : Optional[Any] = None UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_) # inputs with prompt converted to embeddings UpperCamelCase__ : List[Any] = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Dict = image if mask_image is not None: UpperCamelCase__ : Optional[int] = mask_image if original_image is not None: UpperCamelCase__ : Union[str, Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , ) UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = inputs['generator'] UpperCamelCase__ : List[Any] = inputs['num_inference_steps'] UpperCamelCase__ : Optional[int] = inputs['output_type'] # inputs with prompt converted to embeddings UpperCamelCase__ : Any = { 'prompt_embeds': prompt_embeds, 'negative_prompt_embeds': negative_prompt_embeds, 'generator': generator, 'num_inference_steps': num_inference_steps, 'output_type': output_type, } if image is not None: UpperCamelCase__ : Tuple = image if mask_image is not None: UpperCamelCase__ : Union[str, Any] = mask_image if original_image is not None: UpperCamelCase__ : str = original_image UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4) def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Any = self.get_dummy_components() UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_) pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_) pipe_loaded.to(UpperCAmelCase_) pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_) UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0] UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max() self.assertLess(UpperCAmelCase_ , 1e-4)
6
0
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCAmelCase__ = logging.get_logger(__name__) class __lowercase (__lowerCamelCase ): _lowerCamelCase = '''linear''' _lowerCamelCase = '''cosine''' _lowerCamelCase = '''cosine_with_restarts''' _lowerCamelCase = '''polynomial''' _lowerCamelCase = '''constant''' _lowerCamelCase = '''constant_with_warmup''' _lowerCamelCase = '''piecewise_constant''' def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = -1): return LambdaLR(lowerCamelCase_ , lambda lowerCamelCase_: 1 , last_epoch=lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = -1): def lr_lambda(lowerCamelCase_): if current_step < num_warmup_steps: return float(lowerCamelCase_) / float(max(1.0 , lowerCamelCase_)) return 1.0 return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , last_epoch=lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = -1): UpperCamelCase__ : Union[str, Any] = {} UpperCamelCase__ : List[str] = step_rules.split(',') for rule_str in rule_list[:-1]: UpperCamelCase__ : str = rule_str.split(':') UpperCamelCase__ : Optional[int] = int(lowerCamelCase_) UpperCamelCase__ : Optional[int] = float(lowerCamelCase_) UpperCamelCase__ : Optional[int] = value UpperCamelCase__ : str = float(rule_list[-1]) def create_rules_function(lowerCamelCase_ , lowerCamelCase_): def rule_func(lowerCamelCase_) -> float: UpperCamelCase__ : Dict = sorted(rules_dict.keys()) for i, sorted_step in enumerate(lowerCamelCase_): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func UpperCamelCase__ : str = create_rules_function(lowerCamelCase_ , lowerCamelCase_) return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , last_epoch=lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=-1): def lr_lambda(lowerCamelCase_): if current_step < num_warmup_steps: return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_)) return max( 0.0 , float(num_training_steps - current_step) / float(max(1 , num_training_steps - num_warmup_steps))) return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.5 , lowerCamelCase_ = -1): def lr_lambda(lowerCamelCase_): if current_step < num_warmup_steps: return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_)) UpperCamelCase__ : List[Any] = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps)) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase_) * 2.0 * progress))) return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = -1): def lr_lambda(lowerCamelCase_): if current_step < num_warmup_steps: return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_)) UpperCamelCase__ : str = float(current_step - num_warmup_steps) / float(max(1 , num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase_) * progress) % 1.0)))) return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1e-7 , lowerCamelCase_=1.0 , lowerCamelCase_=-1): UpperCamelCase__ : Dict = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})') def lr_lambda(lowerCamelCase_): if current_step < num_warmup_steps: return float(lowerCamelCase_) / float(max(1 , lowerCamelCase_)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: UpperCamelCase__ : Union[str, Any] = lr_init - lr_end UpperCamelCase__ : int = num_training_steps - num_warmup_steps UpperCamelCase__ : str = 1 - (current_step - num_warmup_steps) / decay_steps UpperCamelCase__ : Dict = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) lowerCAmelCase__ = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = 1.0 , lowerCamelCase_ = -1 , ): UpperCamelCase__ : Union[str, Any] = SchedulerType(lowerCamelCase_) UpperCamelCase__ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCamelCase_ , last_epoch=lowerCamelCase_) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCamelCase_ , step_rules=lowerCamelCase_ , last_epoch=lowerCamelCase_) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.') if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , last_epoch=lowerCamelCase_) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.') if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , num_cycles=lowerCamelCase_ , last_epoch=lowerCamelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , power=lowerCamelCase_ , last_epoch=lowerCamelCase_ , ) return schedule_func( lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , last_epoch=lowerCamelCase_)
712
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase__ = 3 def __UpperCAmelCase ( lowerCamelCase_) -> int: print('Generating primitive root of p') while True: UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_) if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1: continue if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1: continue return g def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('Generating prime p...') UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number. UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p. UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety. UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_) UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p) UpperCamelCase__ : Optional[Any] = (key_size, d) return public_key, private_key def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None: if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'): print('\nWARNING:') print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' 'Use a different name or delete these files and re-run this program.') sys.exit() UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_) print(f'\nWriting public key to file {name}_pubkey.txt...') with open(f'{name}_pubkey.txt' , 'w') as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}') print(f'Writing private key to file {name}_privkey.txt...') with open(f'{name}_privkey.txt' , 'w') as fo: fo.write(f'{private_key[0]},{private_key[1]}') def __UpperCAmelCase ( ) -> None: print('Making key files...') make_key_files('elgamal' , 2_048) print('Key files generation successful') if __name__ == "__main__": main()
6
0
'''simple docstring''' import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __UpperCAmelCase ( lowerCamelCase_) -> Any: return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device) def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: UpperCamelCase__ : Optional[int] = create_tensor(lowerCamelCase_) UpperCamelCase__ : List[Any] = gather(lowerCamelCase_) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1)) def __UpperCAmelCase ( lowerCamelCase_) -> Dict: UpperCamelCase__ : Dict = [state.process_index] UpperCamelCase__ : Any = gather_object(lowerCamelCase_) assert len(lowerCamelCase_) == state.num_processes, f'{gathered_obj}, {len(lowerCamelCase_)} != {state.num_processes}' assert gathered_obj == list(range(state.num_processes)), f'{gathered_obj} != {list(range(state.num_processes))}' def __UpperCAmelCase ( lowerCamelCase_) -> int: UpperCamelCase__ : Dict = create_tensor(lowerCamelCase_) UpperCamelCase__ : Optional[Any] = broadcast(lowerCamelCase_) assert broadcasted_tensor.shape == torch.Size([state.num_processes]) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1)) def __UpperCAmelCase ( lowerCamelCase_) -> str: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCamelCase__ : Optional[Any] = torch.arange(state.num_processes + 1).to(state.device) else: UpperCamelCase__ : Any = torch.arange(state.num_processes).to(state.device) UpperCamelCase__ : int = pad_across_processes(lowerCamelCase_) assert padded_tensor.shape == torch.Size([state.num_processes + 1]) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes)) + [0] def __UpperCAmelCase ( lowerCamelCase_) -> int: # For now runs on only two processes if state.num_processes != 2: return UpperCamelCase__ : Dict = create_tensor(lowerCamelCase_) UpperCamelCase__ : int = reduce(lowerCamelCase_ , 'sum') UpperCamelCase__ : Tuple = torch.tensor([4.0, 6]).to(state.device) assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), f'{reduced_tensor} != {truth_tensor}' def __UpperCAmelCase ( lowerCamelCase_) -> Any: # For now runs on only two processes if state.num_processes != 2: return UpperCamelCase__ : List[str] = create_tensor(lowerCamelCase_) UpperCamelCase__ : List[str] = reduce(lowerCamelCase_ , 'mean') UpperCamelCase__ : List[Any] = torch.tensor([2.0, 3]).to(state.device) assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), f'{reduced_tensor} != {truth_tensor}' def __UpperCAmelCase ( lowerCamelCase_) -> Any: # For xla_spawn (TPUs) main() def __UpperCAmelCase ( ) -> Union[str, Any]: UpperCamelCase__ : Any = PartialState() state.print(f'State: {state}') state.print('testing gather') test_gather(lowerCamelCase_) state.print('testing gather_object') test_gather_object(lowerCamelCase_) state.print('testing broadcast') test_broadcast(lowerCamelCase_) state.print('testing pad_across_processes') test_pad_across_processes(lowerCamelCase_) state.print('testing reduce_sum') test_reduce_sum(lowerCamelCase_) state.print('testing reduce_mean') test_reduce_mean(lowerCamelCase_) if __name__ == "__main__": main()
713
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCAmelCase__ = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: for attribute in key.split('.'): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCamelCase__ : str = 'lm_head' UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_) if weight_type is not None: UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape else: UpperCamelCase__ : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": UpperCamelCase__ : Optional[Any] = value elif weight_type == "weight_g": UpperCamelCase__ : Union[str, Any] = value elif weight_type == "weight_v": UpperCamelCase__ : List[Any] = value elif weight_type == "bias": UpperCamelCase__ : Any = value else: UpperCamelCase__ : Optional[int] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]: UpperCamelCase__ : List[Any] = [] UpperCamelCase__ : int = fairseq_model.state_dict() UpperCamelCase__ : int = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]: UpperCamelCase__ : Any = True if "*" in mapped_key: UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2] UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_) if "weight_g" in name: UpperCamelCase__ : int = 'weight_g' elif "weight_v" in name: UpperCamelCase__ : Any = 'weight_v' elif "bias" in name: UpperCamelCase__ : Union[str, Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase__ : Any = 'weight' else: UpperCamelCase__ : Tuple = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) continue if not is_used: unused_weights.append(lowerCamelCase_) logger.warning(f'Unused weights: {unused_weights}') def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1] UpperCamelCase__ : List[Any] = name.split('.') UpperCamelCase__ : Any = int(items[0]) UpperCamelCase__ : int = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) UpperCamelCase__ : Tuple = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) UpperCamelCase__ : int = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) UpperCamelCase__ : Optional[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) UpperCamelCase__ : List[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(lowerCamelCase_) @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple: if config_path is not None: UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_) else: UpperCamelCase__ : int = UniSpeechConfig() if is_finetuned: if dict_path: UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase__ : List[Any] = target_dict.pad_index UpperCamelCase__ : Dict = target_dict.bos_index UpperCamelCase__ : Union[str, Any] = target_dict.eos_index UpperCamelCase__ : Tuple = len(target_dict.symbols) UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json') if not os.path.isdir(lowerCamelCase_): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_)) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_) UpperCamelCase__ : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase__ : Any = 42 UpperCamelCase__ : List[str] = 43 with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle: json.dump(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , ) UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_) processor.save_pretrained(lowerCamelCase_) UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_) else: UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_) if is_finetuned: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path}) else: UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) UpperCamelCase__ : int = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) hf_unispeech.save_pretrained(lowerCamelCase_) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowerCAmelCase__ = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
6
0
'''simple docstring''' import argparse import copy def __UpperCAmelCase ( lowerCamelCase_) -> List[str]: UpperCamelCase__ : List[str] = {} with open(lowerCamelCase_) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCamelCase__ : Any = [] _list.append([line.split()[1], line.split()[2]]) UpperCamelCase__ : int = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]]) if line.split()[1] not in dict_of_neighbours: UpperCamelCase__ : Union[str, Any] = [] _list.append([line.split()[0], line.split()[2]]) UpperCamelCase__ : Optional[Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]]) return dict_of_neighbours def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]: with open(lowerCamelCase_) as f: UpperCamelCase__ : str = f.read(1) UpperCamelCase__ : Dict = start_node UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : Tuple = start_node UpperCamelCase__ : Optional[Any] = 0 while visiting not in first_solution: UpperCamelCase__ : Dict = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1]) < int(lowerCamelCase_) and k[0] not in first_solution: UpperCamelCase__ : Optional[int] = k[1] UpperCamelCase__ : Optional[Any] = k[0] first_solution.append(lowerCamelCase_) UpperCamelCase__ : Tuple = distance_of_first_solution + int(lowerCamelCase_) UpperCamelCase__ : Tuple = best_node first_solution.append(lowerCamelCase_) UpperCamelCase__ : List[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCamelCase__ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1]) - 10_000 ) return first_solution, distance_of_first_solution def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase__ : Optional[Any] = [] for n in solution[1:-1]: UpperCamelCase__ : int = solution.index(lowerCamelCase_) for kn in solution[1:-1]: UpperCamelCase__ : List[Any] = solution.index(lowerCamelCase_) if n == kn: continue UpperCamelCase__ : Optional[int] = copy.deepcopy(lowerCamelCase_) UpperCamelCase__ : Dict = kn UpperCamelCase__ : Tuple = n UpperCamelCase__ : int = 0 for k in _tmp[:-1]: UpperCamelCase__ : Union[str, Any] = _tmp[_tmp.index(lowerCamelCase_) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCamelCase__ : Optional[int] = distance + int(i[1]) _tmp.append(lowerCamelCase_) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) UpperCamelCase__ : Dict = len(neighborhood_of_solution[0]) - 1 neighborhood_of_solution.sort(key=lambda lowerCamelCase_: x[index_of_last_item_in_the_list]) return neighborhood_of_solution def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple: UpperCamelCase__ : Optional[Any] = 1 UpperCamelCase__ : Tuple = first_solution UpperCamelCase__ : Dict = [] UpperCamelCase__ : int = distance_of_first_solution UpperCamelCase__ : List[str] = solution while count <= iters: UpperCamelCase__ : List[str] = find_neighborhood(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : Any = neighborhood[index_of_best_solution] UpperCamelCase__ : Any = len(lowerCamelCase_) - 1 UpperCamelCase__ : int = False while not found: UpperCamelCase__ : Any = 0 while i < len(lowerCamelCase_): if best_solution[i] != solution[i]: UpperCamelCase__ : Optional[Any] = best_solution[i] UpperCamelCase__ : Union[str, Any] = solution[i] break UpperCamelCase__ : List[Any] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node]) UpperCamelCase__ : Union[str, Any] = True UpperCamelCase__ : Optional[int] = best_solution[:-1] UpperCamelCase__ : List[Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCamelCase__ : Optional[Any] = cost UpperCamelCase__ : Any = solution else: UpperCamelCase__ : List[str] = index_of_best_solution + 1 UpperCamelCase__ : List[str] = neighborhood[index_of_best_solution] if len(lowerCamelCase_) >= size: tabu_list.pop(0) UpperCamelCase__ : List[str] = count + 1 return best_solution_ever, best_cost def __UpperCAmelCase ( lowerCamelCase_=None) -> Dict: UpperCamelCase__ : List[str] = generate_neighbours(args.File) UpperCamelCase__ : Optional[Any] = generate_first_solution( args.File , lowerCamelCase_) UpperCamelCase__ : Union[str, Any] = tabu_search( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.') if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
714
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[str]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Union[str, Any] = 1 UpperCamelCase__ : Union[str, Any] = 3 UpperCamelCase__ : Dict = (32, 32) UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_) return image @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def __UpperCamelCase ( self : Any): torch.manual_seed(0) UpperCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def __UpperCamelCase ( self : str): torch.manual_seed(0) UpperCamelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[Any]): def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict): class __lowercase : def __init__( self : List[Any]): UpperCamelCase__ : Optional[Any] = torch.ones([0]) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int): self.pixel_values.to(UpperCAmelCase_) return self return Out() return extract def __UpperCamelCase ( self : str): UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : Any = self.dummy_cond_unet UpperCamelCase__ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) UpperCamelCase__ : List[str] = self.dummy_vae UpperCamelCase__ : str = self.dummy_text_encoder UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Tuple = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ : int = self.dummy_cond_unet UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Optional[int] = self.dummy_vae UpperCamelCase__ : Optional[int] = self.dummy_text_encoder UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # make sure here that pndm scheduler skips prk UpperCamelCase__ : Dict = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np') UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0] UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) assert isinstance(pipe.scheduler , UpperCAmelCase_) assert pipe.safety_checker is None UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase_) UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Dict = self.dummy_cond_unet UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_) UpperCamelCase__ : Any = self.dummy_vae UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') # put models in fp16 UpperCamelCase__ : Any = unet.half() UpperCamelCase__ : Tuple = vae.half() UpperCamelCase__ : Optional[int] = bert.half() # make sure here that pndm scheduler skips prk UpperCamelCase__ : Optional[int] = StableDiffusionPipeline( unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , ) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger' UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) UpperCamelCase__ : Any = 4_003_660_346 UpperCamelCase__ : Any = 7 # without safety guidance (sld_guidance_scale = 0) UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : str = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # without safety guidance (strong configuration) UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Dict = output.images UpperCamelCase__ : str = image[0, -3:, -3:, -1] UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_) UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity' UpperCamelCase__ : Tuple = 2_734_971_755 UpperCamelCase__ : Tuple = 7 UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : int = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : int = output.images UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Tuple = output.images UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1] UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Any): UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5') UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : int = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) UpperCamelCase__ : Any = 1_044_355_234 UpperCamelCase__ : Optional[int] = 12 UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : str = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) UpperCamelCase__ : List[str] = output.images UpperCamelCase__ : Any = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7 UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_) UpperCamelCase__ : List[str] = sd_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61]) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
6
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase (__lowerCamelCase ): _lowerCamelCase = ['''image_processor''', '''tokenizer'''] _lowerCamelCase = '''LayoutLMv3ImageProcessor''' _lowerCamelCase = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self : Any , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Any): UpperCamelCase__ : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCAmelCase_ , ) UpperCamelCase__ : Dict = kwargs.pop('feature_extractor') UpperCamelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(UpperCAmelCase_ , UpperCAmelCase_) def __call__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase_ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[str] , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.') if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.') # first, apply the image processor UpperCamelCase__ : Optional[Any] = self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCamelCase__ : int = features['words'] UpperCamelCase__ : int = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel values UpperCamelCase__ : Any = features.pop('pixel_values') if return_overflowing_tokens is True: UpperCamelCase__ : List[Any] = self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs['overflow_to_sample_mapping']) UpperCamelCase__ : Any = images return encoded_inputs def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image UpperCamelCase__ : List[str] = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(UpperCAmelCase_) != len(UpperCAmelCase_): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F' {len(UpperCAmelCase_)} and {len(UpperCAmelCase_)}') return images_with_overflow def __UpperCamelCase ( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_) @property def __UpperCamelCase ( self : Dict): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : List[Any]): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : Any): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , ) return self.image_processor
715
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCAmelCase__ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } lowerCAmelCase__ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __UpperCAmelCase ( ) -> Union[str, Any]: UpperCamelCase__ : Optional[Any] = ( list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1)) ) UpperCamelCase__ : List[Any] = bs[:] UpperCamelCase__ : Optional[int] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase_) cs.append(2**8 + n) n += 1 UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_)) def __UpperCAmelCase ( lowerCamelCase_) -> Tuple: UpperCamelCase__ : Any = set() UpperCamelCase__ : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char)) UpperCamelCase__ : str = char return pairs class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ): UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token super().__init__( errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , ) with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle: UpperCamelCase__ : Any = json.load(UpperCAmelCase_) UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()} UpperCamelCase__ : Any = errors # how to handle errors in decoding UpperCamelCase__ : Tuple = bytes_to_unicode() UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle: UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1] UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges] UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Dict = {} UpperCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __UpperCamelCase ( self : Tuple): return len(self.encoder) def __UpperCamelCase ( self : Tuple): return dict(self.encoder , **self.added_tokens_encoder) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]): if token in self.cache: return self.cache[token] UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_) UpperCamelCase__ : int = get_pairs(UpperCAmelCase_) if not pairs: return token while True: UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf'))) if bigram not in self.bpe_ranks: break UpperCamelCase__, UpperCamelCase__ : Tuple = bigram UpperCamelCase__ : Dict = [] UpperCamelCase__ : Optional[int] = 0 while i < len(UpperCAmelCase_): try: UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) UpperCamelCase__ : Any = j if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_) UpperCamelCase__ : Dict = new_word if len(UpperCAmelCase_) == 1: break else: UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : List[Any] = word return word def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any): UpperCamelCase__ : Optional[Any] = [] for token in re.findall(self.pat , UpperCAmelCase_): UpperCamelCase__ : Optional[int] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' ')) return bpe_tokens def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token)) def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]): return self.decoder.get(UpperCAmelCase_) def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int): UpperCamelCase__ : int = ''.join(UpperCAmelCase_) UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): if not os.path.isdir(UpperCAmelCase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return UpperCamelCase__ : str = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n') UpperCamelCase__ : str = 0 with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!') UpperCamelCase__ : List[Any] = token_index writer.write(' '.join(UpperCAmelCase_) + '\n') index += 1 return vocab_file, merge_file def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_)) + [1] return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1] def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Any = [self.sep_token_id] UpperCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]): UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()): UpperCamelCase__ : str = ' ' + text return (text, kwargs) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): return token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"): UpperCamelCase__ : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text) else: # Generated responses should contain them already. inputs.append(UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_) UpperCamelCase__ : int = self.encode(UpperCAmelCase_) if len(UpperCAmelCase_) > self.model_max_length: UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.') return input_ids
6
0
'''simple docstring''' def __UpperCAmelCase ( lowerCamelCase_) -> list: if any(not isinstance(lowerCamelCase_ , lowerCamelCase_) or x < 0 for x in sequence): raise TypeError('Sequence must be list of non-negative integers') for _ in range(len(lowerCamelCase_)): for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase_ , sequence[1:])): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
716
'''simple docstring''' import requests from bsa import BeautifulSoup def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str: UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser') UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_).find('span').text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
6
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowercase (metaclass=__lowerCamelCase ): _lowerCamelCase = ['''torch''', '''scipy'''] def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int): requires_backends(self , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]): requires_backends(cls , ['torch', 'scipy']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any): requires_backends(cls , ['torch', 'scipy'])
717
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : int): UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') model.to(UpperCAmelCase_) from datasets import load_dataset UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo') UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB') UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_) UpperCamelCase__ : Tuple = outputs.logits UpperCamelCase__ : str = torch.Size((1, 16)) self.assertEqual(logits.shape , UpperCAmelCase_) UpperCamelCase__ : Tuple = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
6
0
'''simple docstring''' from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES lowerCAmelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test lowerCAmelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowerCAmelCase__ = dict(zip(vocab, range(len(vocab)))) lowerCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(tmpdirname) lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] lowerCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) lowerCAmelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) lowerCAmelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) lowerCAmelCase__ = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test lowerCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') lowerCAmelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
718
'''simple docstring''' import argparse import struct import unittest class __lowercase : def __init__( self : Tuple , UpperCAmelCase_ : bytes): UpperCamelCase__ : Dict = data # Initialize hash values UpperCamelCase__ : Any = [ 0X6A_09E_667, 0XBB_67A_E85, 0X3C_6EF_372, 0XA5_4FF_53A, 0X51_0E5_27F, 0X9B_056_88C, 0X1F_83D_9AB, 0X5B_E0C_D19, ] # Initialize round constants UpperCamelCase__ : List[Any] = [ 0X42_8A2_F98, 0X71_374_491, 0XB5_C0F_BCF, 0XE9_B5D_BA5, 0X39_56C_25B, 0X59_F11_1F1, 0X92_3F8_2A4, 0XAB_1C5_ED5, 0XD8_07A_A98, 0X12_835_B01, 0X24_318_5BE, 0X55_0C7_DC3, 0X72_BE5_D74, 0X80_DEB_1FE, 0X9B_DC0_6A7, 0XC1_9BF_174, 0XE4_9B6_9C1, 0XEF_BE4_786, 0X0F_C19_DC6, 0X24_0CA_1CC, 0X2D_E92_C6F, 0X4A_748_4AA, 0X5C_B0A_9DC, 0X76_F98_8DA, 0X98_3E5_152, 0XA8_31C_66D, 0XB0_032_7C8, 0XBF_597_FC7, 0XC6_E00_BF3, 0XD5_A79_147, 0X06_CA6_351, 0X14_292_967, 0X27_B70_A85, 0X2E_1B2_138, 0X4D_2C6_DFC, 0X53_380_D13, 0X65_0A7_354, 0X76_6A0_ABB, 0X81_C2C_92E, 0X92_722_C85, 0XA2_BFE_8A1, 0XA8_1A6_64B, 0XC2_4B8_B70, 0XC7_6C5_1A3, 0XD1_92E_819, 0XD6_990_624, 0XF4_0E3_585, 0X10_6AA_070, 0X19_A4C_116, 0X1E_376_C08, 0X27_487_74C, 0X34_B0B_CB5, 0X39_1C0_CB3, 0X4E_D8A_A4A, 0X5B_9CC_A4F, 0X68_2E6_FF3, 0X74_8F8_2EE, 0X78_A56_36F, 0X84_C87_814, 0X8C_C70_208, 0X90_BEF_FFA, 0XA4_506_CEB, 0XBE_F9A_3F7, 0XC6_717_8F2, ] UpperCamelCase__ : Tuple = self.preprocessing(self.data) self.final_hash() @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : bytes): UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64)) UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8)) return data + padding + big_endian_integer def __UpperCamelCase ( self : Union[str, Any]): # Convert into blocks of 64 bytes UpperCamelCase__ : int = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data) , 64) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_)) # add 48 0-ed integers words += [0] * 48 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes for index in range(0 , 64): if index > 15: # modify the zero-ed indexes at the end of the array UpperCamelCase__ : Dict = ( self.ror(words[index - 15] , 7) ^ self.ror(words[index - 15] , 18) ^ (words[index - 15] >> 3) ) UpperCamelCase__ : Tuple = ( self.ror(words[index - 2] , 17) ^ self.ror(words[index - 2] , 19) ^ (words[index - 2] >> 10) ) UpperCamelCase__ : int = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100_000_000 # Compression UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25) UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g) UpperCamelCase__ : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100_000_000 UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22) UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c) UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000 UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = ( g, f, e, ((d + tempa) % 0X100_000_000), c, b, a, ((tempa + tempa) % 0X100_000_000), ) UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h] # Modify final values UpperCamelCase__ : Optional[Any] = [ ((element + mutated_hash_values[index]) % 0X100_000_000) for index, element in enumerate(self.hashes) ] UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes]) def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int): return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : int): import hashlib UpperCamelCase__ : str = bytes('Test String' , 'utf-8') self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest()) def __UpperCAmelCase ( ) -> None: import doctest doctest.testmod() UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file') UpperCamelCase__ : List[str] = parser.parse_args() UpperCamelCase__ : str = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb') as f: UpperCamelCase__ : Any = f.read() else: UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8') print(SHAaaa(lowerCamelCase_).hash) if __name__ == "__main__": main()
6
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __lowercase : _lowerCamelCase = 42 _lowerCamelCase = None _lowerCamelCase = None lowerCAmelCase__ = namedtuple('CoinsDistribResult', 'moves excess') def __UpperCAmelCase ( lowerCamelCase_) -> int: if root is None: return 0 # Validation def count_nodes(lowerCamelCase_) -> int: if node is None: return 0 return count_nodes(node.left) + count_nodes(node.right) + 1 def count_coins(lowerCamelCase_) -> int: if node is None: return 0 return count_coins(node.left) + count_coins(node.right) + node.data if count_nodes(lowerCamelCase_) != count_coins(lowerCamelCase_): raise ValueError('The nodes number should be same as the number of coins') # Main calculation def get_distrib(lowerCamelCase_) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1) UpperCamelCase__ : List[Any] = get_distrib(node.left) UpperCamelCase__ : Tuple = get_distrib(node.right) UpperCamelCase__ : Optional[Any] = 1 - left_distrib_excess UpperCamelCase__ : Optional[int] = 1 - right_distrib_excess UpperCamelCase__ : str = ( left_distrib_moves + right_distrib_moves + abs(lowerCamelCase_) + abs(lowerCamelCase_) ) UpperCamelCase__ : str = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowerCamelCase_ , lowerCamelCase_) return get_distrib(lowerCamelCase_)[0] if __name__ == "__main__": import doctest doctest.testmod()
719
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants lowerCAmelCase__ = 300 # TEMPERATURE (unit = K) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float: if donor_conc <= 0: raise ValueError('Donor concentration should be positive') elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive') elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive') elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration') elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration') else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
6
0
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore lowerCAmelCase__ = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" lowerCAmelCase__ = [file for file in filepaths if file != file.lower()] if upper_files: print(f'''{len(upper_files)} files contain uppercase characters:''') print('\n'.join(upper_files) + '\n') lowerCAmelCase__ = [file for file in filepaths if ' ' in file] if space_files: print(f'''{len(space_files)} files contain space characters:''') print('\n'.join(space_files) + '\n') lowerCAmelCase__ = [file for file in filepaths if '-' in file] if hyphen_files: print(f'''{len(hyphen_files)} files contain hyphen characters:''') print('\n'.join(hyphen_files) + '\n') lowerCAmelCase__ = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'''{len(nodir_files)} files are not in a directory:''') print('\n'.join(nodir_files) + '\n') lowerCAmelCase__ = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
720
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]: UpperCamelCase__ : int = [] if isinstance(lowerCamelCase_ , lowerCamelCase_): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , (list, tuple)): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase_)) elif isinstance(lowerCamelCase_ , torch.Tensor): shapes.append(tree.shape) else: raise ValueError('Not supported') return shapes @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]: UpperCamelCase__ : int = [] for d in reversed(lowerCamelCase_): idx.append(flat_idx % d) UpperCamelCase__ : Any = flat_idx // d return tuple(reversed(lowerCamelCase_)) @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCamelCase_) -> None: UpperCamelCase__ : Tuple = True for i in range(len(lowerCamelCase_)): UpperCamelCase__ : List[Any] = -1 * (i + 1) l[reversed_idx] &= tally UpperCamelCase__ : Optional[Any] = l[reversed_idx] if start_edges is None: UpperCamelCase__ : int = [s == 0 for s in start] reduce_edge_list(lowerCamelCase_) if end_edges is None: UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)] reduce_edge_list(lowerCamelCase_) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase_) == 0: return [()] elif len(lowerCamelCase_) == 1: return [(slice(start[0] , end[0] + 1),)] UpperCamelCase__ : List[Tuple[slice, ...]] = [] UpperCamelCase__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase_ , lowerCamelCase_): if s == e: path_list.append(slice(lowerCamelCase_ , s + 1)) else: break UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_) UpperCamelCase__ : Dict = len(lowerCamelCase_) # start == end, and we're done if divergence_idx == len(lowerCamelCase_): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : str = start[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , sdi + 1),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , )) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None UpperCamelCase__ : Optional[int] = end[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , edi + 1),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , )) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),)) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),)) slices.extend(lower()) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper()) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),)) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper()) UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),)) slices.extend(lower()) return slices @torch.jit.ignore def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor: UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims] UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_)) # _get_minimal_slice_set is inclusive UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_)) # Get an ordered list of slices to perform UpperCamelCase__ : int = _get_minimal_slice_set( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) UpperCamelCase__ : List[Any] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors]) def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any: if not (len(lowerCamelCase_) > 0): raise ValueError('Must provide at least one input') UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)] UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)]) def _prep_inputs(lowerCamelCase_) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims]) == no_batch_dims: UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:]) else: UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_) UpperCamelCase__ : int = None if _out is not None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out) UpperCamelCase__ : Dict = 1 for d in orig_batch_dims: flat_batch_dim *= d UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCamelCase_) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : Optional[Any] = prepped_outputs for _ in range(lowerCamelCase_): # Chunk the input if not low_mem: UpperCamelCase__ : str = _select_chunk else: UpperCamelCase__ : List[Any] = partial( _chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , ) UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_) # Run the layer on the chunk UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_) # Allocate space for the output if out is None: UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase_ , lowerCamelCase_): def assign(lowerCamelCase_ , lowerCamelCase_) -> None: for k, v in da.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_): assign(lowerCamelCase_ , da[k]) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: UpperCamelCase__ : List[str] = da[k] assign(lowerCamelCase_ , lowerCamelCase_) elif isinstance(lowerCamelCase_ , lowerCamelCase_): for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_): if _add_into_out: xa[i : i + chunk_size] += xa else: UpperCamelCase__ : int = xa elif isinstance(lowerCamelCase_ , torch.Tensor): if _add_into_out: out[i : i + chunk_size] += output_chunk else: UpperCamelCase__ : Dict = output_chunk else: raise ValueError('Not supported') i += chunk_size UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_) return out class __lowercase : def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ): UpperCamelCase__ : str = max_chunk_size UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : Optional[tuple] = None def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int): logging.info('Tuning chunk size...') if min_chunk_size >= self.max_chunk_size: return min_chunk_size UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)] UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size] UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCAmelCase_ : int) -> bool: try: with torch.no_grad(): fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_) return True except RuntimeError: return False UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1 while i > min_viable_chunk_size_index: UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i]) if not viable: UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2 else: UpperCamelCase__ : Optional[int] = i UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2 return candidates[min_viable_chunk_size_index] def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable): UpperCamelCase__ : List[str] = True for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_): assert type(UpperCAmelCase_) == type(UpperCAmelCase_) if isinstance(UpperCAmelCase_ , (list, tuple)): consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])] consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_) else: consistent &= aa == aa return consistent def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ): UpperCamelCase__ : List[Any] = True UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data) == len(UpperCAmelCase_) UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_) else: # Otherwise, we can reuse the precomputed value UpperCamelCase__ : Optional[int] = False if not consistent: UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
6
0
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase__ = Lock() def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any: global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(lowerCamelCase_) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCamelCase__ : int = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCamelCase__ : List[Any] = min(lowerCamelCase_ , lowerCamelCase_) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(lowerCamelCase_) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCamelCase__ : int = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCamelCase__ : List[Any] = max(lowerCamelCase_ , lowerCamelCase_) # after all swaps are performed, send the values back to main result_pipe[1].send(lowerCamelCase_) def __UpperCAmelCase ( lowerCamelCase_) -> List[str]: UpperCamelCase__ : Tuple = [] UpperCamelCase__ : Optional[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe()) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCamelCase__ : Optional[int] = Pipe() UpperCamelCase__ : List[Any] = Pipe() process_array_.append( Process( target=lowerCamelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , )) UpperCamelCase__ : Tuple = temp_rs UpperCamelCase__ : Tuple = temp_rr for i in range(1 , len(lowerCamelCase_) - 1): UpperCamelCase__ : int = Pipe() UpperCamelCase__ : Any = Pipe() process_array_.append( Process( target=lowerCamelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , )) UpperCamelCase__ : List[Any] = temp_rs UpperCamelCase__ : int = temp_rr process_array_.append( Process( target=lowerCamelCase_ , args=( len(lowerCamelCase_) - 1, arr[len(lowerCamelCase_) - 1], temp_ls, None, temp_lr, None, result_pipe[len(lowerCamelCase_) - 1], ) , )) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(lowerCamelCase_)): UpperCamelCase__ : Union[str, Any] = result_pipe[p][0].recv() process_array_[p].join() return arr def __UpperCAmelCase ( ) -> List[str]: UpperCamelCase__ : Union[str, Any] = list(range(10 , 0 , -1)) print('Initial List') print(*lowerCamelCase_) UpperCamelCase__ : Optional[int] = odd_even_transposition(lowerCamelCase_) print('Sorted List\n') print(*lowerCamelCase_) if __name__ == "__main__": main()
721
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : int = tempfile.mkdtemp() # fmt: off UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'} UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(UpperCAmelCase_) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(UpperCAmelCase_)) UpperCamelCase__ : Dict = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]): return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def __UpperCamelCase ( self : str): shutil.rmtree(self.tmpdirname) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Union[str, Any] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer() UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_slow.save_pretrained(self.tmpdirname) UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_) UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) processor_fast.save_pretrained(self.tmpdirname) UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0) UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , UpperCAmelCase_) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : int = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : int = self.prepare_image_inputs() UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np') UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[Any] = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Any = 'lower newer' UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def __UpperCamelCase ( self : int): UpperCamelCase__ : Optional[int] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'lower newer' UpperCamelCase__ : List[Any] = self.prepare_image_inputs() UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_): processor() def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Any = self.get_image_processor() UpperCamelCase__ : Dict = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_) UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : Union[str, Any] = self.get_image_processor() UpperCamelCase__ : List[str] = self.get_tokenizer() UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'lower newer' UpperCamelCase__ : Optional[int] = self.prepare_image_inputs() UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
6
0
'''simple docstring''' from __future__ import annotations def _UpperCAmelCase ( a : str , a : str ) -> bool: """simple docstring""" lowercase_ : Union[str, Any] = get_failure_array(a ) # 2) Step through text searching for pattern lowercase_ , lowercase_ : Dict = 0, 0 # index into text, pattern while i < len(a ): if pattern[j] == text[i]: if j == (len(a ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: lowercase_ : Optional[Any] = failure[j - 1] continue i += 1 return False def _UpperCAmelCase ( a : str ) -> list[int]: """simple docstring""" lowercase_ : int = [0] lowercase_ : List[Any] = 0 lowercase_ : Union[str, Any] = 1 while j < len(a ): if pattern[i] == pattern[j]: i += 1 elif i > 0: lowercase_ : Dict = failure[i - 1] continue j += 1 failure.append(a ) return failure if __name__ == "__main__": # Test 1) A: Optional[int] = "abc1abc12" A: Optional[int] = "alskfjaldsabc1abc1abc12k23adsfabcabc" A: List[Any] = "alskfjaldsk23adsfabcabc" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) A: List[Any] = "ABABX" A: List[Any] = "ABABZABABYABABX" assert kmp(pattern, text) # Test 3) A: Union[str, Any] = "AAAB" A: Union[str, Any] = "ABAAAAAB" assert kmp(pattern, text) # Test 4) A: Optional[int] = "abcdabcy" A: Union[str, Any] = "abcxabcdabxabcdabcdabcy" assert kmp(pattern, text) # Test 5) A: Tuple = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
7
'''simple docstring''' def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]: """simple docstring""" lowercase_ : list[list[float]] = [] for data in source_data: for i, el in enumerate(a ): if len(a ) < i + 1: data_lists.append([] ) data_lists[i].append(float(a ) ) return data_lists def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]: """simple docstring""" lowercase_ : list[list[float]] = [] for dlist, weight in zip(a , a ): lowercase_ : Tuple = min(a ) lowercase_ : Any = max(a ) lowercase_ : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowercase_ : str = f"Invalid weight of {weight:f} provided" raise ValueError(a ) score_lists.append(a ) return score_lists def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]: """simple docstring""" lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(a ): lowercase_ : List[Any] = final_scores[j] + ele return final_scores def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]: """simple docstring""" lowercase_ : int = get_data(a ) lowercase_ : Optional[int] = calculate_each_score(a , a ) lowercase_ : Dict = generate_final_scores(a ) # append scores to source data for i, ele in enumerate(a ): source_data[i].append(a ) return source_data
7
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A: List[str] = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: int = ["PoolFormerFeatureExtractor"] A: int = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: Optional[int] = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys A: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
7
'''simple docstring''' def _UpperCAmelCase ( a : int , a : int ) -> int: """simple docstring""" while second != 0: lowercase_ : Any = first & second first ^= second lowercase_ : List[str] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() A: Union[str, Any] = int(input("Enter the first number: ").strip()) A: Union[str, Any] = int(input("Enter the second number: ").strip()) print(f"""{add(first, second) = }""")
7
1
'''simple docstring''' A: Dict = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def _UpperCAmelCase ( a : int ) -> int: """simple docstring""" lowercase_ : Any = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution A: list[bool | None] = [None] * 1_0_0_0_0_0_0_0 A: Optional[Any] = True A: List[Any] = False def _UpperCAmelCase ( a : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowercase_ : Union[str, Any] = chain(next_number(a ) ) lowercase_ : Any = number_chain while number < 1_0_0_0_0_0_0_0: lowercase_ : Dict = number_chain number *= 1_0 return number_chain def _UpperCAmelCase ( a : int = 1_0_0_0_0_0_0_0 ) -> int: """simple docstring""" for i in range(1 , a ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(a ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
7
'''simple docstring''' class __magic_name__ : """simple docstring""" def __init__( self , _lowercase ) -> Union[str, Any]: lowercase_ : Dict = n lowercase_ : Dict = [None] * self.n lowercase_ : Tuple = 0 # index of the first element lowercase_ : List[Any] = 0 lowercase_ : List[Any] = 0 def __len__( self ) -> int: return self.size def lowerCamelCase__ ( self ) -> bool: return self.size == 0 def lowerCamelCase__ ( self ) -> List[Any]: return False if self.is_empty() else self.array[self.front] def lowerCamelCase__ ( self , _lowercase ) -> Any: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) lowercase_ : Tuple = data lowercase_ : List[Any] = (self.rear + 1) % self.n self.size += 1 return self def lowerCamelCase__ ( self ) -> Any: if self.size == 0: raise Exception('UNDERFLOW' ) lowercase_ : Dict = self.array[self.front] lowercase_ : Tuple = None lowercase_ : int = (self.front + 1) % self.n self.size -= 1 return temp
7
1
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def _UpperCAmelCase ( a : Optional[int] ) -> List[Any]: """simple docstring""" for param in module.parameters(): lowercase_ : Union[str, Any] = False def _UpperCAmelCase ( ) -> List[Any]: """simple docstring""" lowercase_ : Dict = 'cuda' if torch.cuda.is_available() else 'cpu' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase_ : Tuple = 'mps' if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def _UpperCAmelCase ( a : List[str] ) -> List[str]: """simple docstring""" lowercase_ : str = plt.imshow(a ) fig.axes.get_xaxis().set_visible(a ) fig.axes.get_yaxis().set_visible(a ) plt.show() def _UpperCAmelCase ( ) -> Tuple: """simple docstring""" lowercase_ : Optional[int] = datetime.now() lowercase_ : Tuple = current_time.strftime('%H:%M:%S' ) return timestamp
7
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n" def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict: """simple docstring""" lowercase_ : List[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase_ : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple: """simple docstring""" lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) lowercase_ : int = np.array(pil_image.convert('RGB' ) ) lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1 lowercase_ : Any = np.transpose(a , [2, 0, 1] ) lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 ) return image class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]: super().__init__() self.register_modules( unet=_lowercase , scheduler=_lowercase , movq=_lowercase , ) lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int: # get the original timestep using init_timestep lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase ) lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 ) lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any: if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" ) lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase ) lowercase_ : Dict = batch_size * num_images_per_prompt if image.shape[1] == 4: lowercase_ : str = image else: if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(_lowercase , _lowercase ): lowercase_ : List[Any] = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase ) ] lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 ) else: lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase ) lowercase_ : str = self.movq.config.scaling_factor * init_latents lowercase_ : int = torch.cat([init_latents] , dim=0 ) lowercase_ : Dict = init_latents.shape lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase ) # get latents lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase ) lowercase_ : Optional[Any] = init_latents return latents def lowerCamelCase__ ( self , _lowercase=0 ) -> int: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" ) lowercase_ : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_lowercase , _lowercase ) def lowerCamelCase__ ( self , _lowercase=0 ) -> int: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_lowercase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase_ : Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase ) # We'll offload the last model manually. lowercase_ : List[str] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase__ ( self ) -> List[str]: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_lowercase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_lowercase ) def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str: lowercase_ : List[Any] = self._execution_device lowercase_ : List[Any] = guidance_scale > 1.0 if isinstance(_lowercase , _lowercase ): lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 ) lowercase_ : Optional[Any] = image_embeds.shape[0] if isinstance(_lowercase , _lowercase ): lowercase_ : List[str] = torch.cat(_lowercase , dim=0 ) if do_classifier_free_guidance: lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 ) lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 ) lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase ) if not isinstance(_lowercase , _lowercase ): lowercase_ : Union[str, Any] = [image] if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" ) lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 ) lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase ) lowercase_ : Dict = self.movq.encode(_lowercase )['latents'] lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 ) self.scheduler.set_timesteps(_lowercase , device=_lowercase ) lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase ) lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt ) lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor ) lowercase_ : List[str] = self.prepare_latents( _lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase ) for i, t in enumerate(self.progress_bar(_lowercase ) ): # expand the latents if we are doing classifier free guidance lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase_ : str = {'image_embeds': image_embeds} lowercase_ : str = self.unet( sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0] if do_classifier_free_guidance: lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 ) lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 ) lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 ) lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase_ : Dict = self.scheduler.step( _lowercase , _lowercase , _lowercase , generator=_lowercase , )[0] # post-processing lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: lowercase_ : Dict = image * 0.5 + 0.5 lowercase_ : Dict = image.clamp(0 , 1 ) lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase_ : int = self.numpy_to_pil(_lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowercase )
7
1
'''simple docstring''' def _UpperCAmelCase ( a : int , a : int ) -> int: """simple docstring""" while a != 0: lowercase_ , lowercase_ : int = b % a, a return b def _UpperCAmelCase ( a : int , a : int ) -> int: """simple docstring""" if gcd(a , a ) != 1: lowercase_ : List[Any] = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(a ) lowercase_ , lowercase_ , lowercase_ : int = 1, 0, a lowercase_ , lowercase_ , lowercase_ : Tuple = 0, 1, m while va != 0: lowercase_ : List[str] = ua // va lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
7
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule A: int = { "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
1
'''simple docstring''' from math import sqrt def _UpperCAmelCase ( a : int ) -> int: """simple docstring""" lowercase_ : List[str] = 0 for i in range(1 , int(sqrt(a ) + 1 ) ): if n % i == 0 and i != sqrt(a ): total += i + n // i elif i == sqrt(a ): total += i return total - n def _UpperCAmelCase ( a : int = 1_0_0_0_0 ) -> int: """simple docstring""" lowercase_ : Dict = sum( i for i in range(1 , a ) if sum_of_divisors(sum_of_divisors(a ) ) == i and sum_of_divisors(a ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
7
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: A: List[Any] = json.load(f) @require_torch class __magic_name__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self , _lowercase ) -> Tuple: return FSMTTokenizer.from_pretrained(_lowercase ) def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]: lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}" lowercase_ : str = self.get_tokenizer(_lowercase ) lowercase_ : Any = self.get_model(_lowercase ) lowercase_ : Any = bleu_data[pair]['src'] lowercase_ : Any = bleu_data[pair]['tgt'] lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase ) lowercase_ : str = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowercase_ : Any = tokenizer.batch_decode( _lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase ) lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase ) print(_lowercase ) self.assertGreaterEqual(scores['bleu'] , _lowercase )
7
1
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = (KDPMaDiscreteScheduler,) SCREAMING_SNAKE_CASE_ : Dict = 1_0 def lowerCamelCase__ ( self , **_lowercase ) -> List[Any]: lowercase_ : str = { 'num_train_timesteps': 1100, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**_lowercase ) return config def lowerCamelCase__ ( self ) -> Dict: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowercase ) def lowerCamelCase__ ( self ) -> List[str]: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase ) def lowerCamelCase__ ( self ) -> int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowercase ) def lowerCamelCase__ ( self ) -> Any: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowercase ) def lowerCamelCase__ ( self ) -> Any: lowercase_ : Dict = self.scheduler_classes[0] lowercase_ : int = self.get_scheduler_config(prediction_type='v_prediction' ) lowercase_ : Union[str, Any] = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps ) lowercase_ : Optional[int] = self.dummy_model() lowercase_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase_ : Union[str, Any] = sample.to(_lowercase ) for i, t in enumerate(scheduler.timesteps ): lowercase_ : Optional[int] = scheduler.scale_model_input(_lowercase , _lowercase ) lowercase_ : Tuple = model(_lowercase , _lowercase ) lowercase_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase ) lowercase_ : str = output.prev_sample lowercase_ : Optional[Any] = torch.sum(torch.abs(_lowercase ) ) lowercase_ : List[Any] = torch.mean(torch.abs(_lowercase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2 assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2 assert abs(result_mean.item() - 0.00_02 ) < 1E-3 def lowerCamelCase__ ( self ) -> Optional[Any]: if torch_device == "mps": return lowercase_ : str = self.scheduler_classes[0] lowercase_ : Tuple = self.get_scheduler_config() lowercase_ : Tuple = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps ) lowercase_ : Optional[int] = self.dummy_model() lowercase_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase_ : str = sample.to(_lowercase ) for i, t in enumerate(scheduler.timesteps ): lowercase_ : Dict = scheduler.scale_model_input(_lowercase , _lowercase ) lowercase_ : Union[str, Any] = model(_lowercase , _lowercase ) lowercase_ : List[Any] = scheduler.step(_lowercase , _lowercase , _lowercase ) lowercase_ : List[Any] = output.prev_sample lowercase_ : Union[str, Any] = torch.sum(torch.abs(_lowercase ) ) lowercase_ : str = torch.mean(torch.abs(_lowercase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1E-2 assert abs(result_mean.item() - 0.02_66 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1E-2 assert abs(result_mean.item() - 0.02_66 ) < 1E-3 def lowerCamelCase__ ( self ) -> Dict: if torch_device == "mps": return lowercase_ : str = self.scheduler_classes[0] lowercase_ : Dict = self.get_scheduler_config() lowercase_ : Optional[int] = scheduler_class(**_lowercase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowercase ) lowercase_ : Dict = self.dummy_model() lowercase_ : List[Any] = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowercase_ : Tuple = scheduler.scale_model_input(_lowercase , _lowercase ) lowercase_ : Union[str, Any] = model(_lowercase , _lowercase ) lowercase_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase ) lowercase_ : Union[str, Any] = output.prev_sample lowercase_ : Any = torch.sum(torch.abs(_lowercase ) ) lowercase_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) ) if str(_lowercase ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1E-2 assert abs(result_mean.item() - 0.02_66 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1E-2 assert abs(result_mean.item() - 0.02_66 ) < 1E-3
7
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A: int = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A: Union[str, Any] = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
1
'''simple docstring''' def _UpperCAmelCase ( a : int = 1_0_0 ) -> int: """simple docstring""" lowercase_ : Dict = n * (n + 1) * (2 * n + 1) / 6 lowercase_ : Tuple = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f"""{solution() = }""")
7
'''simple docstring''' def _UpperCAmelCase ( a : str ) -> str: """simple docstring""" lowercase_ : Dict = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowercase_ : Dict = '' lowercase_ : Any = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(a ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowercase_ , lowercase_ : Dict = 0, 0 # length[i] shows the length of palindromic substring with center i lowercase_ : List[Any] = [1 for i in range(len(a ) )] # for each character in new_string find corresponding palindromic string lowercase_ : Dict = 0 for j in range(len(a ) ): lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(a ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowercase_ : int = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowercase_ : Tuple = j - k + 1 # noqa: E741 lowercase_ : Tuple = j + k - 1 # update max_length and start position if max_length < length[j]: lowercase_ : Tuple = length[j] lowercase_ : List[Any] = j # create that string lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
7
1
'''simple docstring''' import os import numpy import onnx def _UpperCAmelCase ( a : Optional[Any] , a : Any ) -> List[str]: """simple docstring""" lowercase_ : Optional[Any] = a.name lowercase_ : List[str] = b.name lowercase_ : int = '' lowercase_ : Dict = '' lowercase_ : Optional[Any] = a == b lowercase_ : Optional[Any] = name_a lowercase_ : Optional[Any] = name_b return res def _UpperCAmelCase ( a : Any , a : Optional[Any] , a : Any ) -> Union[str, Any]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(a , a ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , a , a ) _graph_replace_input_with(node_proto.attribute[1].g , a , a ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , a , a ) def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : Any ) -> Tuple: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(a , a , a ) def _UpperCAmelCase ( a : List[Any] , a : Dict , a : str ) -> List[Any]: """simple docstring""" lowercase_ : Tuple = list(model.graph.initializer ) lowercase_ : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowercase_ : List[str] = inits[i].name lowercase_ : Dict = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , a , a ) def _UpperCAmelCase ( a : List[Any] ) -> str: """simple docstring""" lowercase_ : List[Any] = os.path.dirname(a ) lowercase_ : List[str] = os.path.basename(a ) lowercase_ : Dict = onnx.load(os.path.join(a , a ) ) lowercase_ : Optional[int] = list(model.graph.initializer ) lowercase_ : Tuple = set() lowercase_ : int = {} lowercase_ : Optional[Any] = [] lowercase_ : Optional[Any] = 0 for i in range(len(a ) ): if i in dup_set: continue for j in range(i + 1 , len(a ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(a ) dup_set.add(a ) lowercase_ : Any = inits[j].data_type lowercase_ : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , a ) total_reduced_size += mem_size lowercase_ : List[str] = inits[i].name lowercase_ : List[Any] = inits[j].name if name_i in dup_map: dup_map[name_i].append(a ) else: lowercase_ : str = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) lowercase_ : Optional[Any] = sorted(a ) _remove_dup_initializers_from_model(a , a , a ) lowercase_ : Dict = 'optimized_' + model_file_name lowercase_ : str = os.path.join(a , a ) onnx.save(a , a ) return new_model
7
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = '' SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]: super().__init__(self , **_lowercase ) lowercase_ : int = repo_info lowercase_ : List[Any] = token lowercase_ : Union[str, Any] = None def lowerCamelCase__ ( self ) -> Optional[Any]: if self.dir_cache is None: lowercase_ : Optional[Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowercase_ : str = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict: if not isinstance(self.repo_info , _lowercase ): raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" ) lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha ) return fsspec.open( _lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open() def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple: self._get_dirs() lowercase_ : str = self._strip_protocol(_lowercase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_lowercase ) def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]: self._get_dirs() lowercase_ : List[str] = PurePosixPath(path.strip('/' ) ) lowercase_ : List[str] = {} for p, f in self.dir_cache.items(): lowercase_ : Tuple = PurePosixPath(p.strip('/' ) ) lowercase_ : Optional[int] = p.parent if root == path: lowercase_ : List[str] = f lowercase_ : List[str] = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
7
1
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = IFPipeline SCREAMING_SNAKE_CASE_ : Any = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} SCREAMING_SNAKE_CASE_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ : Dict = PipelineTesterMixin.required_optional_params - {'latents'} def lowerCamelCase__ ( self ) -> Any: return self._get_dummy_components() def lowerCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Optional[int]: if str(_lowercase ).startswith('mps' ): lowercase_ : List[Any] = torch.manual_seed(_lowercase ) else: lowercase_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) lowercase_ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowerCamelCase__ ( self ) -> str: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def lowerCamelCase__ ( self ) -> int: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCamelCase__ ( self ) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCamelCase__ ( self ) -> Union[str, Any]: self._test_save_load_local() def lowerCamelCase__ ( self ) -> Tuple: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowerCamelCase__ ( self ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self ) -> int: # if lowercase_ : List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa ) lowercase_ : Dict = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_lowercase , tokenizer=_lowercase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) lowercase_ , lowercase_ : Union[str, Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowercase_ : str = None lowercase_ : Optional[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_lowercase , _lowercase , _lowercase , _lowercase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowercase_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components ) lowercase_ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_lowercase , _lowercase , _lowercase , _lowercase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowercase_ : int = IFInpaintingPipeline(**pipe_a.components ) lowercase_ : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_lowercase , _lowercase , _lowercase , _lowercase ) def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() lowercase_ : int = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase_ : Any = pipe_a( prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='np' , ) lowercase_ : int = output.images[0] assert image.shape == (64, 64, 3) lowercase_ : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 lowercase_ : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(_lowercase , _lowercase ) # pipeline 2 _start_torch_memory_measurement() lowercase_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase_ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : Any = pipe_a( prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='np' , ) lowercase_ : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) lowercase_ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(_lowercase , _lowercase ) def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any: # pipeline 1 _start_torch_memory_measurement() lowercase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase_ : List[Any] = pipe_a( prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='np' , ) lowercase_ : Any = output.images[0] assert image.shape == (64, 64, 3) lowercase_ : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowercase_ : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(_lowercase , _lowercase ) # pipeline 2 _start_torch_memory_measurement() lowercase_ : Any = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase_ : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : Any = pipe_a( prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='np' , ) lowercase_ : Dict = output.images[0] assert image.shape == (256, 256, 3) lowercase_ : int = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase_ : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(_lowercase , _lowercase ) def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() lowercase_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_lowercase ) lowercase_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase_ : List[str] = pipe_a( prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , num_inference_steps=2 , generator=_lowercase , output_type='np' , ) lowercase_ : List[str] = output.images[0] assert image.shape == (64, 64, 3) lowercase_ : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowercase_ : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(_lowercase , _lowercase ) # pipeline 2 _start_torch_memory_measurement() lowercase_ : Any = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_lowercase ) lowercase_ : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_lowercase ) lowercase_ : str = pipe_a( prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , image=_lowercase , mask_image=_lowercase , original_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='np' , ) lowercase_ : Any = output.images[0] assert image.shape == (256, 256, 3) lowercase_ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase_ : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(_lowercase , _lowercase ) def _UpperCAmelCase ( ) -> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
7
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A: List[Any] = logging.get_logger(__name__) def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]: """simple docstring""" lowercase_ : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append( (f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict: """simple docstring""" for i in range(config.num_hidden_layers ): lowercase_ : Optional[int] = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" ) lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowercase_ : Dict = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = in_proj_bias[: config.hidden_size] lowercase_ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase_ : Tuple = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Dict = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( a : List[str] ) -> Optional[int]: """simple docstring""" lowercase_ : Union[str, Any] = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(a , a ) def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase_ : List[Any] = dct.pop(a ) lowercase_ : Dict = val @torch.no_grad() def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a ) lowercase_ : int = False lowercase_ : Union[str, Any] = False lowercase_ : List[str] = False lowercase_ : str = False if "vqa" in checkpoint_url: lowercase_ : str = True lowercase_ : Optional[int] = 3_1_2_9 lowercase_ : Any = 'huggingface/label-files' lowercase_ : Optional[Any] = 'vqa2-id2label.json' lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) ) lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()} lowercase_ : List[Any] = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} lowercase_ : List[Any] = ViltForQuestionAnswering(a ) elif "nlvr" in checkpoint_url: lowercase_ : Dict = True lowercase_ : List[str] = 2 lowercase_ : Tuple = {0: 'False', 1: 'True'} lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()} lowercase_ : int = 3 lowercase_ : Any = ViltForImagesAndTextClassification(a ) elif "irtr" in checkpoint_url: lowercase_ : Union[str, Any] = True lowercase_ : Dict = ViltForImageAndTextRetrieval(a ) elif "mlm_itm" in checkpoint_url: lowercase_ : int = True lowercase_ : Tuple = ViltForMaskedLM(a ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict'] lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a ) if mlm_model or irtr_model: lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(a , a ) # load state dict into HuggingFace model model.eval() if mlm_model: lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(a ) # Define processor lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 ) lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' ) lowercase_ : Any = ViltProcessor(a , a ) # Forward pass on example inputs (image + text) if nlvr_model: lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw ) lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw ) lowercase_ : Any = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' ) lowercase_ : List[str] = processor(a , a , return_tensors='pt' ) lowercase_ : Union[str, Any] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw ) if mlm_model: lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].' else: lowercase_ : List[Any] = 'How many cats are there?' lowercase_ : List[Any] = processor(a , a , return_tensors='pt' ) lowercase_ : Optional[int] = model(**a ) # Verify outputs if mlm_model: lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] ) lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 ) # verify masked token prediction equals "cats" lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] ) lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] ) assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 ) # verify vqa prediction equals "2" lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: lowercase_ : Optional[Any] = torch.Size([1, 2] ) lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] ) assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(a ).mkdir(exist_ok=a ) print(f"Saving model and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(a ) processor.save_pretrained(a ) if __name__ == "__main__": A: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) A: Union[str, Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
7
1
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 A: Optional[int] = sys.version_info >= (3, 1_0) def _UpperCAmelCase ( a : Tuple=None , a : List[str]=None ) -> Dict: """simple docstring""" return field(default_factory=lambda: default , metadata=a ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : float SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : bool @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : int = 4_2 SCREAMING_SNAKE_CASE_ : str = field(default='toto', metadata={'help': 'help message'} ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : bool = True SCREAMING_SNAKE_CASE_ : Optional[bool] = None class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = 'titi' SCREAMING_SNAKE_CASE_ : Any = 'toto' class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 'titi' SCREAMING_SNAKE_CASE_ : Optional[int] = 'toto' SCREAMING_SNAKE_CASE_ : List[Any] = 4_2 @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : BasicEnum = "toto" def lowerCamelCase__ ( self ) -> Union[str, Any]: lowercase_ : str = BasicEnum(self.foo ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : MixedTypeEnum = "toto" def lowerCamelCase__ ( self ) -> str: lowercase_ : Dict = MixedTypeEnum(self.foo ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=UpperCAmelCase_, metadata={'help': 'help message'} ) SCREAMING_SNAKE_CASE_ : Optional[str] = None SCREAMING_SNAKE_CASE_ : Optional[List[str]] = list_field(default=[] ) SCREAMING_SNAKE_CASE_ : Optional[List[int]] = list_field(default=[] ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : List[int] = list_field(default=[] ) SCREAMING_SNAKE_CASE_ : List[int] = list_field(default=[1, 2, 3] ) SCREAMING_SNAKE_CASE_ : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) SCREAMING_SNAKE_CASE_ : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : List[int] = field() SCREAMING_SNAKE_CASE_ : str = field() SCREAMING_SNAKE_CASE_ : BasicEnum = field() def lowerCamelCase__ ( self ) -> Tuple: lowercase_ : Dict = BasicEnum(self.required_enum ) @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : "BasicEnum" = field() SCREAMING_SNAKE_CASE_ : "Optional[bool]" = None SCREAMING_SNAKE_CASE_ : "str" = field(default='toto', metadata={'help': 'help message'} ) SCREAMING_SNAKE_CASE_ : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : bool = False SCREAMING_SNAKE_CASE_ : bool = True SCREAMING_SNAKE_CASE_ : bool | None = None @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : int | None = None SCREAMING_SNAKE_CASE_ : float | None = field(default=UpperCAmelCase_, metadata={'help': 'help message'} ) SCREAMING_SNAKE_CASE_ : str | None = None SCREAMING_SNAKE_CASE_ : list[str] | None = list_field(default=[] ) SCREAMING_SNAKE_CASE_ : list[int] | None = list_field(default=[] ) class __magic_name__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowercase_ : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != 'container'} lowercase_ : List[str] = {k: v for k, v in vars(_lowercase ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , _lowercase ) and yy.get('choices' , _lowercase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](_lowercase ) , yy['type'](_lowercase ) ) del xx["type"], yy["type"] self.assertEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> List[str]: lowercase_ : Optional[int] = HfArgumentParser(_lowercase ) lowercase_ : List[str] = argparse.ArgumentParser() expected.add_argument('--foo' , type=_lowercase , required=_lowercase ) expected.add_argument('--bar' , type=_lowercase , required=_lowercase ) expected.add_argument('--baz' , type=_lowercase , required=_lowercase ) expected.add_argument('--flag' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='?' ) self.argparsersEqual(_lowercase , _lowercase ) lowercase_ : List[Any] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((lowercase_) , ) : Any = parser.parse_args_into_dataclasses(_lowercase , look_for_args_file=_lowercase ) self.assertFalse(example.flag ) def lowerCamelCase__ ( self ) -> List[Any]: lowercase_ : Union[str, Any] = HfArgumentParser(_lowercase ) lowercase_ : Union[str, Any] = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=_lowercase ) expected.add_argument('--baz' , default='toto' , type=_lowercase , help='help message' ) self.argparsersEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> str: lowercase_ : Any = argparse.ArgumentParser() expected.add_argument('--foo' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='?' ) expected.add_argument('--baz' , type=_lowercase , default=_lowercase , const=_lowercase , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=_lowercase , dest='baz' ) expected.add_argument('--opt' , type=_lowercase , default=_lowercase ) lowercase_ : Any = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowercase ) for dataclass_type in dataclass_types: lowercase_ : List[str] = HfArgumentParser(_lowercase ) self.argparsersEqual(_lowercase , _lowercase ) lowercase_ : Dict = parser.parse_args([] ) self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) ) lowercase_ : Any = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) ) lowercase_ : Optional[Any] = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) ) lowercase_ : Any = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) ) lowercase_ : str = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(_lowercase , Namespace(foo=_lowercase , baz=_lowercase , opt=_lowercase ) ) def lowerCamelCase__ ( self ) -> int: lowercase_ : Any = HfArgumentParser(_lowercase ) lowercase_ : Union[str, Any] = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_lowercase , _lowercase ) lowercase_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowercase_ : List[str] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowercase_ : Any = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowercase_ : List[str] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowercase_ : Optional[Any] = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) lowercase_ : Any = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCamelCase__ ( self ) -> int: @dataclass class __magic_name__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : Literal["titi", "toto", 4_2] = "toto" lowercase_ : Tuple = HfArgumentParser(_lowercase ) lowercase_ : Dict = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_lowercase , _lowercase ) lowercase_ : Any = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowercase_ : Dict = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowercase_ : int = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def lowerCamelCase__ ( self ) -> str: lowercase_ : Union[str, Any] = HfArgumentParser(_lowercase ) lowercase_ : Optional[int] = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_lowercase ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_lowercase ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_lowercase ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_lowercase ) self.argparsersEqual(_lowercase , _lowercase ) lowercase_ : int = parser.parse_args([] ) self.assertEqual( _lowercase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) lowercase_ : Any = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(_lowercase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def lowerCamelCase__ ( self ) -> int: lowercase_ : Union[str, Any] = argparse.ArgumentParser() expected.add_argument('--foo' , default=_lowercase , type=_lowercase ) expected.add_argument('--bar' , default=_lowercase , type=_lowercase , help='help message' ) expected.add_argument('--baz' , default=_lowercase , type=_lowercase ) expected.add_argument('--ces' , nargs='+' , default=[] , type=_lowercase ) expected.add_argument('--des' , nargs='+' , default=[] , type=_lowercase ) lowercase_ : Any = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowercase ) for dataclass_type in dataclass_types: lowercase_ : List[Any] = HfArgumentParser(_lowercase ) self.argparsersEqual(_lowercase , _lowercase ) lowercase_ : Any = parser.parse_args([] ) self.assertEqual(_lowercase , Namespace(foo=_lowercase , bar=_lowercase , baz=_lowercase , ces=[] , des=[] ) ) lowercase_ : Optional[Any] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(_lowercase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def lowerCamelCase__ ( self ) -> List[str]: lowercase_ : List[str] = HfArgumentParser(_lowercase ) lowercase_ : Dict = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=_lowercase , required=_lowercase ) expected.add_argument('--required_str' , type=_lowercase , required=_lowercase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_lowercase , ) self.argparsersEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> Union[str, Any]: lowercase_ : Tuple = HfArgumentParser(_lowercase ) lowercase_ : List[Any] = argparse.ArgumentParser() expected.add_argument('--foo' , type=_lowercase , required=_lowercase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_lowercase , ) expected.add_argument('--opt' , type=_lowercase , default=_lowercase ) expected.add_argument('--baz' , default='toto' , type=_lowercase , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_lowercase ) self.argparsersEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> Union[str, Any]: lowercase_ : Optional[int] = HfArgumentParser(_lowercase ) lowercase_ : Dict = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } lowercase_ : List[str] = parser.parse_dict(_lowercase )[0] lowercase_ : List[Any] = BasicExample(**_lowercase ) self.assertEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> Dict: lowercase_ : Dict = HfArgumentParser(_lowercase ) lowercase_ : Optional[int] = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 42, } self.assertRaises(_lowercase , parser.parse_dict , _lowercase , allow_extra_keys=_lowercase ) def lowerCamelCase__ ( self ) -> Union[str, Any]: lowercase_ : Dict = HfArgumentParser(_lowercase ) lowercase_ : List[str] = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = os.path.join(_lowercase , 'temp_json' ) os.mkdir(_lowercase ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(_lowercase , _lowercase ) lowercase_ : int = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] lowercase_ : List[str] = BasicExample(**_lowercase ) self.assertEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> Tuple: lowercase_ : List[str] = HfArgumentParser(_lowercase ) lowercase_ : List[Any] = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : str = os.path.join(_lowercase , 'temp_yaml' ) os.mkdir(_lowercase ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(_lowercase , _lowercase ) lowercase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] lowercase_ : Optional[Any] = BasicExample(**_lowercase ) self.assertEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> List[str]: lowercase_ : str = HfArgumentParser(_lowercase ) self.assertIsNotNone(_lowercase )
7
'''simple docstring''' def _UpperCAmelCase ( a : list ) -> list: """simple docstring""" for i in range(len(a ) - 1 , 0 , -1 ): lowercase_ : Any = False for j in range(a , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j] lowercase_ : int = True for j in range(a ): if unsorted[j] > unsorted[j + 1]: lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j] lowercase_ : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() A: Tuple = [int(item) for item in user_input.split(",")] print(f"""{cocktail_shaker_sort(unsorted) = }""")
7
1
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _UpperCAmelCase ( a : Any , a : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase_ : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' lowercase_ : int = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' ) lowercase_ : int = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ), ] ) lowercase_ : List[str] = transform(a ).unsqueeze(0 ).to(a ) return image def _UpperCAmelCase ( a : Tuple ) -> int: """simple docstring""" if "visual_encoder" in key: lowercase_ : str = re.sub('visual_encoder*' , 'vision_model.encoder' , a ) if "blocks" in key: lowercase_ : List[str] = re.sub(R'blocks' , 'layers' , a ) if "attn" in key: lowercase_ : List[str] = re.sub(R'attn' , 'self_attn' , a ) if "norm1" in key: lowercase_ : Tuple = re.sub(R'norm1' , 'layer_norm1' , a ) if "norm2" in key: lowercase_ : str = re.sub(R'norm2' , 'layer_norm2' , a ) if "encoder.norm" in key: lowercase_ : Union[str, Any] = re.sub(R'encoder.norm' , 'post_layernorm' , a ) if "encoder.patch_embed.proj" in key: lowercase_ : Tuple = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , a ) if "encoder.pos_embed" in key: lowercase_ : Tuple = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , a ) if "encoder.cls_token" in key: lowercase_ : List[str] = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , a ) if "self_attn" in key: lowercase_ : List[Any] = re.sub(R'self_attn.proj' , 'self_attn.projection' , a ) return key @torch.no_grad() def _UpperCAmelCase ( a : Union[str, Any] , a : str=None ) -> str: """simple docstring""" if config_path is not None: lowercase_ : List[Any] = BlipConfig.from_pretrained(a ) else: lowercase_ : Tuple = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) lowercase_ : Optional[int] = BlipForConditionalGeneration(a ).eval() lowercase_ : Optional[int] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' lowercase_ : int = blip_decoder(pretrained=a , image_size=3_8_4 , vit='base' ) lowercase_ : Optional[Any] = pt_model.eval() lowercase_ : List[str] = pt_model.state_dict() for key in modified_state_dict.copy(): lowercase_ : int = modified_state_dict.pop(a ) lowercase_ : Dict = rename_key(a ) lowercase_ : Dict = value hf_model.load_state_dict(a ) lowercase_ : Dict = 3_8_4 lowercase_ : List[Any] = load_demo_image(image_size=a , device='cpu' ) lowercase_ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) lowercase_ : Dict = tokenizer(['a picture of'] ).input_ids lowercase_ : List[str] = hf_model.generate(a , a ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] lowercase_ : Any = hf_model.generate(a ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(a ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' lowercase_ : List[Any] = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) lowercase_ : Any = blip_vqa(pretrained=a , image_size=a , vit='base' ) vqa_model.eval() lowercase_ : str = vqa_model.state_dict() for key in modified_state_dict.copy(): lowercase_ : Any = modified_state_dict.pop(a ) lowercase_ : Tuple = rename_key(a ) lowercase_ : List[str] = value lowercase_ : Tuple = BlipForQuestionAnswering(a ) hf_vqa_model.load_state_dict(a ) lowercase_ : int = ['How many dogs are in this image?'] lowercase_ : List[str] = tokenizer(a , return_tensors='pt' ).input_ids lowercase_ : Optional[Any] = hf_vqa_model.generate(a , a ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) lowercase_ : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' lowercase_ : Any = blip_itm(pretrained=a , image_size=a , vit='base' ) itm_model.eval() lowercase_ : List[Any] = itm_model.state_dict() for key in modified_state_dict.copy(): lowercase_ : Optional[Any] = modified_state_dict.pop(a ) lowercase_ : str = rename_key(a ) lowercase_ : Any = value lowercase_ : List[Any] = BlipForImageTextRetrieval(a ) lowercase_ : Union[str, Any] = ['A picture of a woman with a dog sitting in a beach'] lowercase_ : Any = tokenizer( a , return_tensors='pt' , padding='max_length' , truncation=a , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(a ) hf_itm_model.eval() lowercase_ : int = hf_itm_model(a , a , use_itm_head=a ) lowercase_ : str = hf_itm_model(a , a , use_itm_head=a ) assert out[0].item() == 0.21_10_68_74_94_27_79_54 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": A: str = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") A: Union[str, Any] = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
7
'''simple docstring''' from ..utils import DummyObject, requires_backends class __magic_name__ ( metaclass=UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq'] def __init__( self , *_lowercase , **_lowercase ) -> Dict: requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]: requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict: requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
7
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = RobertaTokenizer SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaTokenizerFast SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[Any] = {'cls_token': '<s>'} def lowerCamelCase__ ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowercase_ : Dict = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) lowercase_ : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowercase_ : Optional[int] = {'unk_token': '<unk>'} lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_lowercase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_lowercase ) ) def lowerCamelCase__ ( self , **_lowercase ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase ) def lowerCamelCase__ ( self , **_lowercase ) -> Any: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase ) def lowerCamelCase__ ( self , _lowercase ) -> List[str]: lowercase_ : List[str] = 'lower newer' lowercase_ : str = 'lower newer' return input_text, output_text def lowerCamelCase__ ( self ) -> Any: lowercase_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase_ : str = 'lower newer' lowercase_ : Dict = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] lowercase_ : Optional[int] = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True) self.assertListEqual(_lowercase , _lowercase ) lowercase_ : Any = tokens + [tokenizer.unk_token] lowercase_ : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase ) def lowerCamelCase__ ( self ) -> str: lowercase_ : Dict = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_lowercase ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_lowercase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def lowerCamelCase__ ( self ) -> Optional[int]: lowercase_ : Any = self.tokenizer_class.from_pretrained('roberta-base' ) lowercase_ : Any = tokenizer.encode('sequence builders' , add_special_tokens=_lowercase ) lowercase_ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowercase ) lowercase_ : Dict = tokenizer.encode( 'sequence builders' , add_special_tokens=_lowercase , add_prefix_space=_lowercase ) lowercase_ : Any = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowercase , add_prefix_space=_lowercase ) lowercase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase ) lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCamelCase__ ( self ) -> Dict: lowercase_ : Tuple = self.get_tokenizer() lowercase_ : int = 'Encode this sequence.' lowercase_ : int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments lowercase_ : Optional[int] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase ) lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_lowercase , _lowercase ) lowercase_ : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase ) lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_lowercase , _lowercase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) lowercase_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) lowercase_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_lowercase , _lowercase ) # Testing spaces after special tokens lowercase_ : List[str] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space lowercase_ : List[str] = tokenizer.convert_tokens_to_ids(_lowercase ) lowercase_ : Optional[Any] = 'Encode <mask> sequence' lowercase_ : str = 'Encode <mask>sequence' lowercase_ : int = tokenizer.encode(_lowercase ) lowercase_ : Optional[int] = encoded.index(_lowercase ) lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_lowercase , _lowercase ) lowercase_ : Tuple = tokenizer.encode(_lowercase ) lowercase_ : Optional[Any] = encoded.index(_lowercase ) lowercase_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_lowercase , _lowercase ) def lowerCamelCase__ ( self ) -> Dict: pass def lowerCamelCase__ ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowercase_ : str = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) lowercase_ : Optional[Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) lowercase_ : Any = 'A, <mask> AllenNLP sentence.' lowercase_ : Tuple = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase ) lowercase_ : List[str] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) lowercase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) lowercase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( _lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowerCamelCase__ ( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowercase_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _lowercase ) self.assertEqual(post_processor_state['add_prefix_space'] , _lowercase ) self.assertEqual(post_processor_state['trim_offsets'] , _lowercase ) def lowerCamelCase__ ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowercase_ : Optional[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` lowercase_ : List[str] = f"{text_of_1_token} {text_of_1_token}" lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , ) lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , ) lowercase_ : Any = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , ) lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : str = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , ) lowercase_ : str = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , ) lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , ) lowercase_ : str = self.rust_tokenizer_class.from_pretrained( _lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase ) lowercase_ : Any = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
7
'''simple docstring''' def _UpperCAmelCase ( a : str , a : str ) -> float: """simple docstring""" def get_matched_characters(a : str , a : str ) -> str: lowercase_ : Union[str, Any] = [] lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): lowercase_ : Optional[int] = int(max(0 , i - limit ) ) lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a ) lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}" return "".join(a ) # matching characters lowercase_ : Union[str, Any] = get_matched_characters(a , a ) lowercase_ : Optional[Any] = get_matched_characters(a , a ) lowercase_ : Optional[int] = len(a ) # transposition lowercase_ : Dict = ( len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2 ) if not match_count: lowercase_ : List[str] = 0.0 else: lowercase_ : Any = ( 1 / 3 * ( match_count / len(a ) + match_count / len(a ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters lowercase_ : Optional[Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
7
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging A: Tuple = logging.get_logger(__name__) A: List[Any] = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = 'mctct' def __init__( self , _lowercase=8065 , _lowercase=1536 , _lowercase=36 , _lowercase=6144 , _lowercase=4 , _lowercase=384 , _lowercase=920 , _lowercase=1E-5 , _lowercase=0.3 , _lowercase="relu" , _lowercase=0.02 , _lowercase=0.3 , _lowercase=0.3 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase=0.3 , _lowercase=1 , _lowercase=(7,) , _lowercase=(3,) , _lowercase=80 , _lowercase=1 , _lowercase=None , _lowercase="sum" , _lowercase=False , **_lowercase , ) -> Optional[Any]: super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase ) lowercase_ : Union[str, Any] = vocab_size lowercase_ : List[Any] = hidden_size lowercase_ : Union[str, Any] = num_hidden_layers lowercase_ : str = intermediate_size lowercase_ : List[Any] = num_attention_heads lowercase_ : Optional[int] = attention_head_dim lowercase_ : Optional[Any] = max_position_embeddings lowercase_ : Optional[int] = layer_norm_eps lowercase_ : List[Any] = layerdrop lowercase_ : str = hidden_act lowercase_ : List[str] = initializer_range lowercase_ : List[Any] = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : Dict = pad_token_id lowercase_ : Dict = bos_token_id lowercase_ : Dict = eos_token_id lowercase_ : Dict = conv_glu_dim lowercase_ : int = conv_dropout lowercase_ : List[Any] = num_conv_layers lowercase_ : List[Any] = input_feat_per_channel lowercase_ : List[Any] = input_channels lowercase_ : List[Any] = conv_channels lowercase_ : List[Any] = ctc_loss_reduction lowercase_ : List[Any] = ctc_zero_infinity # prevents config testing fail with exporting to json lowercase_ : Optional[Any] = list(_lowercase ) lowercase_ : Optional[int] = list(_lowercase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." )
7
'''simple docstring''' from __future__ import annotations def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]: """simple docstring""" lowercase_ : Tuple = abs(a ) or 4 return [[1 + x + y * row_size for x in range(a )] for y in range(a )] def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]: """simple docstring""" return reverse_row(transpose(a ) ) # OR.. transpose(reverse_column(matrix)) def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]: """simple docstring""" return reverse_row(reverse_column(a ) ) # OR.. reverse_column(reverse_row(matrix)) def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]: """simple docstring""" return reverse_column(transpose(a ) ) # OR.. transpose(reverse_row(matrix)) def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]: """simple docstring""" lowercase_ : Any = [list(a ) for x in zip(*a )] return matrix def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]: """simple docstring""" lowercase_ : List[str] = matrix[::-1] return matrix def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]: """simple docstring""" lowercase_ : str = [x[::-1] for x in matrix] return matrix def _UpperCAmelCase ( a : list[list[int]] ) -> None: """simple docstring""" for i in matrix: print(*a ) if __name__ == "__main__": A: Dict = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 90 counterclockwise:\n") print_matrix(rotate_aa(matrix)) A: List[Any] = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 180:\n") print_matrix(rotate_aaa(matrix)) A: List[str] = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 270 counterclockwise:\n") print_matrix(rotate_aaa(matrix))
7
1
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = XLMProphetNetTokenizer SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : Optional[int] = True def lowerCamelCase__ ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing lowercase_ : List[Any] = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ) -> Dict: lowercase_ : Dict = '[PAD]' lowercase_ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def lowerCamelCase__ ( self ) -> List[Any]: lowercase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_lowercase ) , 1012 ) def lowerCamelCase__ ( self ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def lowerCamelCase__ ( self ) -> str: lowercase_ : List[str] = XLMProphetNetTokenizer(_lowercase , keep_accents=_lowercase ) lowercase_ : str = tokenizer.tokenize('This is a test' ) self.assertListEqual(_lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase_ : int = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def lowerCamelCase__ ( self ) -> Any: return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def lowerCamelCase__ ( self ) -> Tuple: lowercase_ : List[Any] = 'Hello World!' lowercase_ : Any = [3_5389, 6672, 49, 2] self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) ) @slow def lowerCamelCase__ ( self ) -> Any: # fmt: off lowercase_ : int = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
7
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]: """simple docstring""" lowercase_ : Any = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, oder?', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase_ : List[str] = { 'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'], 'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'], 'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'], 'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'], } lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}" lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(a , exist_ok=a ) lowercase_ : int = os.path.join(a , 'README.md' ) print(f"Generating {path}" ) with open(a , 'w' , encoding='utf-8' ) as f: f.write(a ) # make sure we are under the root of the project A: List[str] = Path(__file__).resolve().parent.parent.parent A: List[str] = repo_dir / "model_cards" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: A , A , A: Any = model_name.split("-") A: int = model_cards_dir / "facebook" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
7
1
'''simple docstring''' def _UpperCAmelCase ( a : list ) -> list: """simple docstring""" for i in range(len(a ) - 1 , 0 , -1 ): lowercase_ : Any = False for j in range(a , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j] lowercase_ : int = True for j in range(a ): if unsorted[j] > unsorted[j + 1]: lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j] lowercase_ : Optional[Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() A: Tuple = [int(item) for item in user_input.split(",")] print(f"""{cocktail_shaker_sort(unsorted) = }""")
7
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) A: Tuple = logging.getLogger(__name__) def _UpperCAmelCase ( a : str ) -> List[Any]: """simple docstring""" lowercase_ : List[str] = git.Repo(search_parent_directories=a ) lowercase_ : Union[str, Any] = { 'repo_id': str(a ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f: json.dump(a , a , indent=4 ) def _UpperCAmelCase ( a : str ) -> Union[str, Any]: """simple docstring""" if params.n_gpu <= 0: lowercase_ : int = 0 lowercase_ : Union[str, Any] = -1 lowercase_ : List[str] = True lowercase_ : Optional[Any] = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase_ : Dict = int(os.environ['WORLD_SIZE'] ) lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] ) lowercase_ : Optional[int] = int(os.environ['RANK'] ) # number of nodes / node ID lowercase_ : int = params.world_size // params.n_gpu_per_node lowercase_ : str = params.global_rank // params.n_gpu_per_node lowercase_ : Dict = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase_ : str = 1 lowercase_ : Dict = 0 lowercase_ : Tuple = 0 lowercase_ : List[Any] = 0 lowercase_ : int = 1 lowercase_ : Tuple = 1 lowercase_ : str = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0 lowercase_ : Optional[Any] = params.n_nodes > 1 # summary lowercase_ : int = f"--- Global rank: {params.global_rank} - " logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' , backend='nccl' , ) def _UpperCAmelCase ( a : Dict ) -> Optional[int]: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
7
1
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True) os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True) os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True) def _UpperCAmelCase ( a : str ) -> Optional[int]: """simple docstring""" if hor == 1_2_8: lowercase_ : str = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') lowercase_ : str = (3_2, 1_2_8, 2_5_6) lowercase_ : Tuple = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 3_2: lowercase_ : Optional[int] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') lowercase_ : Tuple = (3_2, 6_4, 1_2_8, 2_5_6) lowercase_ : List[Any] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') lowercase_ : Any = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" ) lowercase_ : Optional[int] = model.state_dict() lowercase_ : Optional[int] = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 1_4, 'out_channels': 1_4, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 6_5_5_3_6, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } lowercase_ : Any = UNetaDModel(**a ) print(f"length of state dict: {len(state_dict.keys() )}" ) print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) lowercase_ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowercase_ : str = state_dict.pop(a ) hf_value_function.load_state_dict(a ) torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" ) with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , 'w' ) as f: json.dump(a , a ) def _UpperCAmelCase ( ) -> List[Any]: """simple docstring""" lowercase_ : str = { 'in_channels': 1_4, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (3_2, 6_4, 1_2_8, 2_5_6), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 6_5_5_3_6, 'out_channels': 1_4, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } lowercase_ : Tuple = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' ) lowercase_ : Union[str, Any] = model lowercase_ : int = UNetaDModel(**a ) print(f"length of state dict: {len(state_dict.keys() )}" ) print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) lowercase_ : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): lowercase_ : int = state_dict.pop(a ) hf_value_function.load_state_dict(a ) torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' ) with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f: json.dump(a , a ) if __name__ == "__main__": unet(3_2) # unet(128) value_function()
7
'''simple docstring''' import os from distutils.util import strtobool def _UpperCAmelCase ( a : Any , a : int ) -> Any: """simple docstring""" for e in env_keys: lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) ) if val >= 0: return val return default def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]: """simple docstring""" lowercase_ : Optional[int] = os.environ.get(a , str(a ) ) return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int... def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str: """simple docstring""" lowercase_ : List[Any] = os.environ.get(a , str(a ) ) return value
7
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A: Dict = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 'maskformer-swin' SCREAMING_SNAKE_CASE_ : Dict = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=None , _lowercase=None , **_lowercase , ) -> str: super().__init__(**_lowercase ) lowercase_ : List[str] = image_size lowercase_ : Dict = patch_size lowercase_ : Union[str, Any] = num_channels lowercase_ : Tuple = embed_dim lowercase_ : int = depths lowercase_ : str = len(_lowercase ) lowercase_ : Optional[Any] = num_heads lowercase_ : Any = window_size lowercase_ : int = mlp_ratio lowercase_ : Dict = qkv_bias lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : List[str] = drop_path_rate lowercase_ : Any = hidden_act lowercase_ : Union[str, Any] = use_absolute_embeddings lowercase_ : Tuple = layer_norm_eps lowercase_ : str = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase_ : Optional[Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) ) lowercase_ : Tuple = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_lowercase ) + 1 )] lowercase_ , lowercase_ : List[str] = get_aligned_output_features_output_indices( out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
7
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): lowercase_ : Union[str, Any] = new_id # turn into Numpy arrays lowercase_ : List[Any] = np.array(a ) lowercase_ : Optional[Any] = np.array(a ) if reduce_labels: lowercase_ : Any = 2_5_5 lowercase_ : Dict = label - 1 lowercase_ : List[Any] = 2_5_5 lowercase_ : Any = label != ignore_index lowercase_ : List[Any] = np.not_equal(a , a ) lowercase_ : Optional[int] = pred_label[mask] lowercase_ : Union[str, Any] = np.array(a )[mask] lowercase_ : Optional[int] = pred_label[pred_label == label] lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0] lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0] lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0] lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict: """simple docstring""" lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a , a ): lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union( a , a , a , a , a , a ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]: """simple docstring""" lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union( a , a , a , a , a , a ) # compute metrics lowercase_ : str = {} lowercase_ : str = total_area_intersect.sum() / total_area_label.sum() lowercase_ : Optional[Any] = total_area_intersect / total_area_union lowercase_ : List[Any] = total_area_intersect / total_area_label lowercase_ : Any = np.nanmean(a ) lowercase_ : Optional[Any] = np.nanmean(a ) lowercase_ : int = all_acc lowercase_ : Union[str, Any] = iou lowercase_ : Optional[Any] = acc if nan_to_num is not None: lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple: lowercase_ : Optional[int] = mean_iou( results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , ) return iou_result
7
1
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _UpperCAmelCase ( a : List[str] , a : Optional[int]=7 ) -> Optional[int]: """simple docstring""" lowercase_ : str = None if token is not None: lowercase_ : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"} # The id of a workflow (not of a workflow run) lowercase_ : Optional[int] = '636036' lowercase_ : Union[str, Any] = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" lowercase_ : Any = requests.get(a , headers=a ).json() return result["workflow_runs"] def _UpperCAmelCase ( a : List[Any] ) -> Optional[int]: """simple docstring""" lowercase_ : Optional[Any] = get_daily_ci_runs(a ) lowercase_ : Optional[int] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase_ : Optional[Any] = workflow_run['id'] break return workflow_run_id def _UpperCAmelCase ( a : Optional[Any] , a : int , a : List[str] ) -> str: """simple docstring""" lowercase_ : Union[str, Any] = get_last_daily_ci_runs(a ) if workflow_run_id is not None: lowercase_ : List[str] = get_artifacts_links(worflow_run_id=a , token=a ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase_ : List[Any] = artifacts_links[artifact_name] download_artifact( artifact_name=a , artifact_url=a , output_dir=a , token=a ) def _UpperCAmelCase ( a : List[Any] , a : Optional[int] , a : str ) -> Dict: """simple docstring""" get_last_daily_ci_artifacts(a , a , a ) lowercase_ : Tuple = {} for artifact_name in artifact_names: lowercase_ : Tuple = os.path.join(a , f"{artifact_name}.zip" ) if os.path.isfile(a ): lowercase_ : str = {} with zipfile.ZipFile(a ) as z: for filename in z.namelist(): if not os.path.isdir(a ): # read the file with z.open(a ) as f: lowercase_ : Tuple = f.read().decode('UTF-8' ) return results
7
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A: Dict = logging.get_logger(__name__) A: Optional[Any] = { "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit' def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]: super().__init__(**_lowercase ) lowercase_ : Optional[int] = hidden_size lowercase_ : str = num_hidden_layers lowercase_ : str = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Any = hidden_dropout_prob lowercase_ : List[str] = attention_probs_dropout_prob lowercase_ : str = initializer_range lowercase_ : List[str] = layer_norm_eps lowercase_ : Any = image_size lowercase_ : Tuple = patch_size lowercase_ : Optional[Any] = num_channels lowercase_ : str = qkv_bias lowercase_ : List[str] = encoder_stride class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' ) @property def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCamelCase__ ( self ) -> float: return 1E-4
7
1
'''simple docstring''' def _UpperCAmelCase ( a : str , a : int ) -> str: """simple docstring""" lowercase_ : list[list[str]] = [[] for _ in range(a )] lowercase_ : Dict = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1 or len(a ) <= key: return input_string for position, character in enumerate(a ): lowercase_ : Dict = position % (lowest * 2) # puts it in bounds lowercase_ : int = min(a , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(a ) lowercase_ : List[str] = [''.join(a ) for row in temp_grid] lowercase_ : Optional[int] = ''.join(a ) return output_string def _UpperCAmelCase ( a : str , a : int ) -> str: """simple docstring""" lowercase_ : List[Any] = [] lowercase_ : List[str] = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1: return input_string lowercase_ : list[list[str]] = [[] for _ in range(a )] # generates template for position in range(len(a ) ): lowercase_ : Tuple = position % (lowest * 2) # puts it in bounds lowercase_ : str = min(a , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('*' ) lowercase_ : List[Any] = 0 for row in temp_grid: # fills in the characters lowercase_ : List[str] = input_string[counter : counter + len(a )] grid.append(list(a ) ) counter += len(a ) lowercase_ : Tuple = '' # reads as zigzag for position in range(len(a ) ): lowercase_ : int = position % (lowest * 2) # puts it in bounds lowercase_ : str = min(a , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _UpperCAmelCase ( a : str ) -> dict[int, str]: """simple docstring""" lowercase_ : Tuple = {} for key_guess in range(1 , len(a ) ): # tries every key lowercase_ : str = decrypt(a , a ) return results if __name__ == "__main__": import doctest doctest.testmod()
7
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A: int = logging.get_logger(__name__) A: int = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class __magic_name__ ( UpperCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode' SCREAMING_SNAKE_CASE_ : int = ['past_key_values'] SCREAMING_SNAKE_CASE_ : Any = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any: lowercase_ : Tuple = vocab_size lowercase_ : str = n_positions lowercase_ : List[str] = n_embd lowercase_ : str = n_layer lowercase_ : Optional[Any] = n_head lowercase_ : Optional[int] = n_inner lowercase_ : Union[str, Any] = activation_function lowercase_ : Dict = resid_pdrop lowercase_ : str = embd_pdrop lowercase_ : Optional[Any] = attn_pdrop lowercase_ : List[Any] = layer_norm_epsilon lowercase_ : Optional[int] = initializer_range lowercase_ : List[Any] = scale_attn_weights lowercase_ : Any = use_cache lowercase_ : List[str] = attention_softmax_in_fpaa lowercase_ : Any = scale_attention_softmax_in_fpaa lowercase_ : Optional[Any] = multi_query lowercase_ : Optional[Any] = bos_token_id lowercase_ : Optional[Any] = eos_token_id super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
7
1
'''simple docstring''' def _UpperCAmelCase ( a : int = 1 , a : int = 1_0_0_0 ) -> int: """simple docstring""" lowercase_ : Union[str, Any] = 1 lowercase_ : Any = 0 for divide_by_number in range(a , digit + 1 ): lowercase_ : list[int] = [] lowercase_ : Optional[int] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(a ): lowercase_ : Union[str, Any] = len(a ) lowercase_ : str = divide_by_number else: has_been_divided.append(a ) lowercase_ : Optional[Any] = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
7
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __magic_name__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self ) -> Optional[Any]: lowercase_ : Tuple = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(_lowercase ) ) def lowerCamelCase__ ( self ) -> Dict: lowercase_ : Tuple = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(_lowercase ) ) def lowerCamelCase__ ( self ) -> str: lowercase_ : int = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_lowercase ) ) def lowerCamelCase__ ( self ) -> Any: lowercase_ : Dict = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(_lowercase ) ) def lowerCamelCase__ ( self ) -> Optional[Any]: lowercase_ : List[Any] = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(_lowercase ) ) def lowerCamelCase__ ( self ) -> Optional[int]: lowercase_ : str = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowercase_ : Union[str, Any] = 'fp16' self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) ) def lowerCamelCase__ ( self ) -> int: lowercase_ : Optional[int] = [ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowercase_ : Dict = 'fp16' self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) ) def lowerCamelCase__ ( self ) -> int: # pass variant but use the non-variant filenames lowercase_ : Optional[int] = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] lowercase_ : Dict = 'fp16' self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) ) def lowerCamelCase__ ( self ) -> Union[str, Any]: lowercase_ : int = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowercase_ : Dict = 'fp16' self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) ) def lowerCamelCase__ ( self ) -> int: lowercase_ : str = [ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] lowercase_ : str = 'fp16' self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) ) def lowerCamelCase__ ( self ) -> List[str]: # pass variant but use the non-variant filenames lowercase_ : List[Any] = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] lowercase_ : Dict = 'fp16' self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) ) def lowerCamelCase__ ( self ) -> List[str]: lowercase_ : Union[str, Any] = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] lowercase_ : Dict = 'fp16' self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
7
1