code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> bool: __SCREAMING_SNAKE_CASE = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _a ( UpperCAmelCase__ = 50_00 ) -> int: __SCREAMING_SNAKE_CASE = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCAmelCase__ )] for i, pentagonal_i in enumerate(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ , len(UpperCAmelCase__ ) ): __SCREAMING_SNAKE_CASE = pentagonal_nums[j] __SCREAMING_SNAKE_CASE = pentagonal_i + pentagonal_j __SCREAMING_SNAKE_CASE = pentagonal_j - pentagonal_i if is_pentagonal(UpperCAmelCase__ ) and is_pentagonal(UpperCAmelCase__ ): return b return -1 if __name__ == "__main__": print(F'''{solution() = }''')
717
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
0
import string def _a ( UpperCAmelCase__ ) -> None: for key in range(len(string.ascii_uppercase ) ): __SCREAMING_SNAKE_CASE = '''''' for symbol in message: if symbol in string.ascii_uppercase: __SCREAMING_SNAKE_CASE = string.ascii_uppercase.find(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = num - key if num < 0: __SCREAMING_SNAKE_CASE = num + len(string.ascii_uppercase ) __SCREAMING_SNAKE_CASE = translated + string.ascii_uppercase[num] else: __SCREAMING_SNAKE_CASE = translated + symbol print(f"""Decryption using Key #{key}: {translated}""" ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Encrypted message: ''' ) __SCREAMING_SNAKE_CASE = message.upper() decrypt(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
718
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
0
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> int: assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: __SCREAMING_SNAKE_CASE = f"""The input value of [n={number}] has to be > 0""" raise ValueError(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = sylvester(number - 1 ) __SCREAMING_SNAKE_CASE = num - 1 __SCREAMING_SNAKE_CASE = num return lower * upper + 1 if __name__ == "__main__": print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
719
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =10 def _a ( UpperCAmelCase__ ) -> list[int]: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ ) while placement <= max_digit: # declare and initialize empty buckets __SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCAmelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: __SCREAMING_SNAKE_CASE = int((i / placement) % RADIX ) buckets[tmp].append(UpperCAmelCase__ ) # put each buckets' contents into list_of_ints __SCREAMING_SNAKE_CASE = 0 for b in range(UpperCAmelCase__ ): for i in buckets[b]: __SCREAMING_SNAKE_CASE = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
720
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
0
"""simple docstring""" from __future__ import annotations def _a ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> None: if start is None: __SCREAMING_SNAKE_CASE = 0 if end is None: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) - 1 if start >= end: return __SCREAMING_SNAKE_CASE = (start + end) // 2 slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ ) if sequence[end] < sequence[mid]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end] slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
721
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''pixel_values'''] def __init__( self : str , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : str , ) -> None: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 2_56} __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __SCREAMING_SNAKE_CASE = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any ) -> np.ndarray: """simple docstring""" return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : str , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: __SCREAMING_SNAKE_CASE = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] __SCREAMING_SNAKE_CASE = {'''pixel_values''': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Tuple] = None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = target_sizes.numpy() __SCREAMING_SNAKE_CASE = [] for idx in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = logits.argmax(dim=1 ) __SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
700
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _a ( UpperCAmelCase__ ) -> dict[str, str]: __SCREAMING_SNAKE_CASE = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __SCREAMING_SNAKE_CASE = remove_duplicates(key.upper() ) __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # First fill cipher with key characters __SCREAMING_SNAKE_CASE = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 26 ): __SCREAMING_SNAKE_CASE = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __SCREAMING_SNAKE_CASE = alphabet[i - offset] __SCREAMING_SNAKE_CASE = char return cipher_alphabet def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def _a ( ) -> None: __SCREAMING_SNAKE_CASE = input('''Enter message to encode or decode: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Enter keyword: ''' ).strip() __SCREAMING_SNAKE_CASE = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __SCREAMING_SNAKE_CASE = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __SCREAMING_SNAKE_CASE = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
690
0
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "vocab.json", "merges_file": "merges.txt"} lowerCAmelCase__ ={ "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } lowerCAmelCase__ ={ "allenai/longformer-base-4096": 4_096, "allenai/longformer-large-4096": 4_096, "allenai/longformer-large-4096-finetuned-triviaqa": 4_096, "allenai/longformer-base-4096-extra.pos.embd.only": 4_096, "allenai/longformer-large-4096-extra.pos.embd.only": 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ) -> List[Any]: __SCREAMING_SNAKE_CASE = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __SCREAMING_SNAKE_CASE = bs[:] __SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCAmelCase__ ) cs.append(2**8 + n ) n += 1 __SCREAMING_SNAKE_CASE = [chr(UpperCAmelCase__ ) for n in cs] return dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE = char return pairs class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="replace" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Dict="</s>" , __SCREAMING_SNAKE_CASE : Dict="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<pad>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , __SCREAMING_SNAKE_CASE : str=False , **__SCREAMING_SNAKE_CASE : int , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle: __SCREAMING_SNAKE_CASE = json.load(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} __SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding __SCREAMING_SNAKE_CASE = bytes_to_unicode() __SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle: __SCREAMING_SNAKE_CASE = merges_handle.read().split('''\n''' )[1:-1] __SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] __SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __SCREAMING_SNAKE_CASE = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _a ( self : List[Any] ) -> Any: """simple docstring""" return len(self.encoder ) def _a ( self : Tuple ) -> Tuple: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict: """simple docstring""" if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE = tuple(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_pairs(__SCREAMING_SNAKE_CASE ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 while i < len(__SCREAMING_SNAKE_CASE ): try: __SCREAMING_SNAKE_CASE = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE = tuple(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = new_word if len(__SCREAMING_SNAKE_CASE ) == 1: break else: __SCREAMING_SNAKE_CASE = get_pairs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = ''' '''.join(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = word return word def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) return bpe_tokens def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" return self.decoder.get(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = ''''''.join(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' ) __SCREAMING_SNAKE_CASE = 0 with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __SCREAMING_SNAKE_CASE = token_index writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' ) index += 1 return vocab_file, merge_file def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] __SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , **__SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()): __SCREAMING_SNAKE_CASE = ''' ''' + text return (text, kwargs)
701
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__: def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = embeddings_size __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _a ( self : Dict ) -> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass def _a ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __SCREAMING_SNAKE_CASE = layer_type __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ): if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} ) def _a ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def _a ( ) -> Dict: __SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A__( unittest.TestCase ): @cached_property def _a ( self : List[Any] ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE ) # verify the logits __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
690
0
"""simple docstring""" from timeit import timeit lowerCAmelCase__ ={ "MALAYALAM": True, "String": False, "rotor": True, "level": True, "A": True, "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _a ( UpperCAmelCase__ ) -> bool: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _a ( UpperCAmelCase__ ) -> bool: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2 __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase__ ) ) def _a ( UpperCAmelCase__ ) -> bool: if len(UpperCAmelCase__ ) <= 2: return True if s[0] == s[len(UpperCAmelCase__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _a ( UpperCAmelCase__ ) -> bool: return s == s[::-1] def _a ( UpperCAmelCase__ ) -> None: __SCREAMING_SNAKE_CASE = f"""all({name}(key) is value for key, value in test_data.items())""" __SCREAMING_SNAKE_CASE = f"""from __main__ import test_data, {name}""" __SCREAMING_SNAKE_CASE = 50_00_00 __SCREAMING_SNAKE_CASE = timeit(stmt=UpperCAmelCase__ , setup=UpperCAmelCase__ , number=UpperCAmelCase__ ) print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F'''{key:21} {value}''') print("a man a plan a canal panama") # finished 500,000 runs in 0.46793 seconds benchmark_function("is_palindrome_slice") # finished 500,000 runs in 0.85234 seconds benchmark_function("is_palindrome") # finished 500,000 runs in 1.32028 seconds benchmark_function("is_palindrome_recursive") # finished 500,000 runs in 2.08679 seconds benchmark_function("is_palindrome_traversal")
702
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = XLMRobertaTokenizer lowerCAmelCase = XLMRobertaTokenizerFast lowerCAmelCase = True lowerCAmelCase = True def _a ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<pad>''' __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_02 ) def _a ( self : Tuple ) -> Optional[int]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _a ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : int ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) @cached_property def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pickle.dumps(__SCREAMING_SNAKE_CASE ) pickle.loads(__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = '''I was born in 92000, and this is falsé.''' __SCREAMING_SNAKE_CASE = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = tokenizer.encode(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = '''Hello World!''' __SCREAMING_SNAKE_CASE = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __SCREAMING_SNAKE_CASE = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def _a ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
690
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''pixel_values'''] def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 2_56} __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __SCREAMING_SNAKE_CASE = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE ) return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size __SCREAMING_SNAKE_CASE = get_size_dict(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: __SCREAMING_SNAKE_CASE = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] __SCREAMING_SNAKE_CASE = {'''pixel_values''': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
703
"""simple docstring""" from __future__ import annotations lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2 def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]: __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
690
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__( __magic_name__ , unittest.TestCase ): lowerCAmelCase = KandinskyVaaPipeline lowerCAmelCase = [ '''image_embeds''', '''negative_image_embeds''', ] lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds'''] lowerCAmelCase = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCAmelCase = False @property def _a ( self : str ) -> Union[str, Any]: """simple docstring""" return 32 @property def _a ( self : List[str] ) -> Dict: """simple docstring""" return 32 @property def _a ( self : int ) -> Optional[Any]: """simple docstring""" return self.time_input_dim @property def _a ( self : Dict ) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : List[str] ) -> Any: """simple docstring""" return 1_00 @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __SCREAMING_SNAKE_CASE = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def _a ( self : Dict ) -> str: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self : Dict ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_unet __SCREAMING_SNAKE_CASE = self.dummy_movq __SCREAMING_SNAKE_CASE = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=0 ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def _a ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : Optional[int] ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' ) __SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = KandinskyVaaPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = '''red cat, 4k photo''' __SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pipe_prior( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipeline( image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
704
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ =logging.get_logger(__name__) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) if "model" in sd.keys(): __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model'''] # pop unnecessary weights __SCREAMING_SNAKE_CASE = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __SCREAMING_SNAKE_CASE = sd[key] # We split QKV in separate Q,K,V __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) __SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) __SCREAMING_SNAKE_CASE = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 ) __SCREAMING_SNAKE_CASE = q __SCREAMING_SNAKE_CASE = k __SCREAMING_SNAKE_CASE = v del sd[key] return sd @torch.no_grad() def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ ) if config is not None: __SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = OPTConfig() __SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval() model.load_state_dict(UpperCAmelCase__ ) # Check results Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") lowerCAmelCase__ =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
690
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ =pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase__ =dataset.iloc[:, 1:2].values lowerCAmelCase__ =dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ =PolynomialFeatures(degree=4) lowerCAmelCase__ =poly_reg.fit_transform(X) lowerCAmelCase__ =LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> List[Any]: plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
705
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__( __magic_name__ ): lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCAmelCase = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCAmelCase = '''document_qa''' lowerCAmelCase = AutoProcessor lowerCAmelCase = VisionEncoderDecoderModel lowerCAmelCase = ['''image''', '''text'''] lowerCAmelCase = ['''text'''] def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer( __SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token __SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE ) return sequence["answer"]
690
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): def __init__( self : int , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple ) -> None: """simple docstring""" warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , ) super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
706
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__( unittest.TestCase ): @property def _a ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _a ( self : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A__( unittest.TestCase ): def _a ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = '''google/ncsnpp-celebahq-256''' __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = KarrasVeScheduler() __SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __SCREAMING_SNAKE_CASE = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
690
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"], "tokenization_canine": ["CanineTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST", "CanineForMultipleChoice", "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
707
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "spiece.model"} lowerCAmelCase__ ={ "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } lowerCAmelCase__ ={ "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __SCREAMING_SNAKE_CASE = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token __SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token else: __SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token __SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __SCREAMING_SNAKE_CASE = re.compile( f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__( self : List[str] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.__dict__.copy() __SCREAMING_SNAKE_CASE = None return state def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Optional[Any] ) -> int: """simple docstring""" return len(self.sp_model ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE ) return text def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" return out_string def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = '''''' __SCREAMING_SNAKE_CASE = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Union[str, Any] ) -> Dict[str, int]: """simple docstring""" __SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: """simple docstring""" return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __SCREAMING_SNAKE_CASE = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__SCREAMING_SNAKE_CASE )
690
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ ={"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase__ ={"UserAgent": UserAgent().random} def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = script.contents[0] __SCREAMING_SNAKE_CASE = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A__: def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = f"""https://www.instagram.com/{username}/""" __SCREAMING_SNAKE_CASE = self.get_json() def _a ( self : List[Any] ) -> dict: """simple docstring""" __SCREAMING_SNAKE_CASE = requests.get(self.url , headers=__SCREAMING_SNAKE_CASE ).text __SCREAMING_SNAKE_CASE = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Tuple ) -> str: """simple docstring""" return f"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: """simple docstring""" return f"""{self.fullname} ({self.username}) is {self.biography}""" @property def _a ( self : Tuple ) -> str: """simple docstring""" return self.user_data["username"] @property def _a ( self : List[Any] ) -> str: """simple docstring""" return self.user_data["full_name"] @property def _a ( self : Optional[Any] ) -> str: """simple docstring""" return self.user_data["biography"] @property def _a ( self : List[str] ) -> str: """simple docstring""" return self.user_data["business_email"] @property def _a ( self : Any ) -> str: """simple docstring""" return self.user_data["external_url"] @property def _a ( self : Any ) -> int: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def _a ( self : Dict ) -> int: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def _a ( self : str ) -> int: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _a ( self : Union[str, Any] ) -> str: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def _a ( self : Tuple ) -> bool: """simple docstring""" return self.user_data["is_verified"] @property def _a ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.user_data["is_private"] def _a ( UpperCAmelCase__ = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions __SCREAMING_SNAKE_CASE = InstagramUser(UpperCAmelCase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCAmelCase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ =InstagramUser("github") print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
690
0
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def _a ( UpperCAmelCase__ ) -> Dict: __SCREAMING_SNAKE_CASE = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' f"""{test_file} instead.""" ) __SCREAMING_SNAKE_CASE = components[-1] if not test_fn.endswith('''py''' ): raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) __SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace('''.py''' , '''''' )] __SCREAMING_SNAKE_CASE = '''.'''.join(UpperCAmelCase__ ) return test_module_path def _a ( UpperCAmelCase__ ) -> Any: __SCREAMING_SNAKE_CASE = get_module_path(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = importlib.import_module(UpperCAmelCase__ ) return test_module def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(UpperCAmelCase__ ) for attr in dir(UpperCAmelCase__ ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # sort with class names return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x.__name__ ) def _a ( UpperCAmelCase__ ) -> Any: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = get_test_module(UpperCAmelCase__ ) for attr in dir(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , '''all_model_classes''' , [] ) if len(UpperCAmelCase__ ) > 0: test_classes.append(UpperCAmelCase__ ) # sort with class names return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x.__name__ ) def _a ( UpperCAmelCase__ ) -> List[str]: __SCREAMING_SNAKE_CASE = get_test_classes(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x.__name__ ) def _a ( UpperCAmelCase__ ) -> str: __SCREAMING_SNAKE_CASE = test_class() if hasattr(UpperCAmelCase__ , '''setUp''' ): test.setUp() __SCREAMING_SNAKE_CASE = None if hasattr(UpperCAmelCase__ , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __SCREAMING_SNAKE_CASE = test.model_tester.__class__ return model_tester def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int: __SCREAMING_SNAKE_CASE = get_test_classes(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(UpperCAmelCase__ ) # sort with class names return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x.__name__ ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: __SCREAMING_SNAKE_CASE = get_test_classes_for_model(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] for test_class in test_classes: __SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(UpperCAmelCase__ ) if tester_class is not None: tester_classes.append(UpperCAmelCase__ ) # sort with class names return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x.__name__ ) def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = get_test_classes(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(UpperCAmelCase__ ) for test_class in test_classes} return test_tester_mapping def _a ( UpperCAmelCase__ ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = get_model_classes(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { model_class: get_test_classes_for_model(UpperCAmelCase__ , UpperCAmelCase__ ) for model_class in model_classes } return model_test_mapping def _a ( UpperCAmelCase__ ) -> Tuple: __SCREAMING_SNAKE_CASE = get_model_classes(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { model_class: get_tester_classes_for_model(UpperCAmelCase__ , UpperCAmelCase__ ) for model_class in model_classes } return model_to_tester_mapping def _a ( UpperCAmelCase__ ) -> Union[str, Any]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return o elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return o.__name__ elif isinstance(UpperCAmelCase__ , (list, tuple) ): return [to_json(UpperCAmelCase__ ) for x in o] elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return {to_json(UpperCAmelCase__ ): to_json(UpperCAmelCase__ ) for k, v in o.items()} else: return o
709
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCAmelCase__ ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" lowerCAmelCase__ ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" lowerCAmelCase__ ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__( datasets.Metric ): def _a ( self : Any ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]="binary" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]="warn" , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = recall_score( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , zero_division=__SCREAMING_SNAKE_CASE , ) return {"recall": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
690
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCAmelCase__ ={ "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } lowerCAmelCase__ ={ "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } lowerCAmelCase__ ={ "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = RealmTokenizer def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]="[UNK]" , __SCREAMING_SNAKE_CASE : Dict="[SEP]" , __SCREAMING_SNAKE_CASE : Optional[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : Tuple="[CLS]" , __SCREAMING_SNAKE_CASE : List[Any]="[MASK]" , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ) -> str: """simple docstring""" super().__init__( __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = strip_accents __SCREAMING_SNAKE_CASE = tokenize_chinese_chars __SCREAMING_SNAKE_CASE = normalizer_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = do_lower_case def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH __SCREAMING_SNAKE_CASE = text __SCREAMING_SNAKE_CASE = kwargs.pop('''text_pair''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = kwargs.pop('''return_tensors''' , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(__SCREAMING_SNAKE_CASE ): if batch_text_pair is not None: __SCREAMING_SNAKE_CASE = batch_text_pair[idx] else: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = super().__call__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = encoded_candidates.get('''input_ids''' ) __SCREAMING_SNAKE_CASE = encoded_candidates.get('''attention_mask''' ) __SCREAMING_SNAKE_CASE = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(__SCREAMING_SNAKE_CASE ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__SCREAMING_SNAKE_CASE ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = {key: item for key, item in output_data.items() if len(__SCREAMING_SNAKE_CASE ) != 0} return BatchEncoding(__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE ) return tuple(__SCREAMING_SNAKE_CASE )
710
"""simple docstring""" def _a ( UpperCAmelCase__ = 10**9 ) -> int: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) lowerCAmelCase__ =logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase__ =argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=30_522, type=int) lowerCAmelCase__ =parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, "rb") as fp: lowerCAmelCase__ =pickle.load(fp) logger.info("Counting occurrences for MLM.") lowerCAmelCase__ =Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase__ =[0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase__ =v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
711
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase__ =pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) lowerCAmelCase__ =dataset.iloc[:, 1:2].values lowerCAmelCase__ =dataset.iloc[:, 2].values lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ =train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase__ =PolynomialFeatures(degree=4) lowerCAmelCase__ =poly_reg.fit_transform(X) lowerCAmelCase__ =LinearRegression() pol_reg.fit(X_poly, y) def _a ( ) -> List[Any]: plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='''red''' ) plt.plot(UpperCAmelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCAmelCase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
690
0
"""simple docstring""" import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class A__( unittest.TestCase ): def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = jnp.ones((batch_size, length) ) / length return scores def _a ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 20 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(batch_size=2 , length=__SCREAMING_SNAKE_CASE ) # tweak scores to not be uniform anymore __SCREAMING_SNAKE_CASE = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch __SCREAMING_SNAKE_CASE = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax __SCREAMING_SNAKE_CASE = jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = FlaxTemperatureLogitsWarper(temperature=0.5 ) __SCREAMING_SNAKE_CASE = FlaxTemperatureLogitsWarper(temperature=1.3 ) __SCREAMING_SNAKE_CASE = jax.nn.softmax(temp_dist_warper_sharper(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 ) __SCREAMING_SNAKE_CASE = jax.nn.softmax(temp_dist_warper_smoother(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = 2 # create ramp distribution __SCREAMING_SNAKE_CASE = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() __SCREAMING_SNAKE_CASE = ramp_logits[1:, : vocab_size // 2] + vocab_size __SCREAMING_SNAKE_CASE = FlaxTopKLogitsWarper(3 ) __SCREAMING_SNAKE_CASE = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case __SCREAMING_SNAKE_CASE = 5 __SCREAMING_SNAKE_CASE = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) __SCREAMING_SNAKE_CASE = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy() __SCREAMING_SNAKE_CASE = top_k_warp_safety_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) __SCREAMING_SNAKE_CASE = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) __SCREAMING_SNAKE_CASE = FlaxTopPLogitsWarper(0.8 ) __SCREAMING_SNAKE_CASE = np.exp(top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 __SCREAMING_SNAKE_CASE = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # check edge cases with negative and extreme logits __SCREAMING_SNAKE_CASE = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme __SCREAMING_SNAKE_CASE = ramp_logits[1] * 1_00.0 # make sure at least 2 tokens are kept __SCREAMING_SNAKE_CASE = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) __SCREAMING_SNAKE_CASE = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _a ( self : Any ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = 20 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE ) # check that min length is applied at length 5 __SCREAMING_SNAKE_CASE = ids_tensor((batch_size, 20) , vocab_size=20 ) __SCREAMING_SNAKE_CASE = 5 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 15 __SCREAMING_SNAKE_CASE = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() ) def _a ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 20 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE ) # check that all scores are -inf except the bos_token_id score __SCREAMING_SNAKE_CASE = ids_tensor((batch_size, 1) , vocab_size=20 ) __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() ) def _a ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = 20 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 5 __SCREAMING_SNAKE_CASE = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) # check that all scores are -inf except the eos_token_id when max_length is reached __SCREAMING_SNAKE_CASE = ids_tensor((batch_size, 4) , vocab_size=20 ) __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = 15 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 15 # dummy input_ids and scores __SCREAMING_SNAKE_CASE = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = input_ids.copy() __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scores.copy() # instantiate all dist processors __SCREAMING_SNAKE_CASE = FlaxTemperatureLogitsWarper(temperature=0.5 ) __SCREAMING_SNAKE_CASE = FlaxTopKLogitsWarper(3 ) __SCREAMING_SNAKE_CASE = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __SCREAMING_SNAKE_CASE = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 10 # no processor list __SCREAMING_SNAKE_CASE = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # with processor list __SCREAMING_SNAKE_CASE = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __SCREAMING_SNAKE_CASE = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) # scores should be equal self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _a ( self : int ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = 15 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 15 # dummy input_ids and scores __SCREAMING_SNAKE_CASE = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = input_ids.copy() __SCREAMING_SNAKE_CASE = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scores.copy() # instantiate all dist processors __SCREAMING_SNAKE_CASE = FlaxTemperatureLogitsWarper(temperature=0.5 ) __SCREAMING_SNAKE_CASE = FlaxTopKLogitsWarper(3 ) __SCREAMING_SNAKE_CASE = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __SCREAMING_SNAKE_CASE = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 10 # no processor list def run_no_processor_list(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ): __SCREAMING_SNAKE_CASE = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) return scores # with processor list def run_processor_list(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ): __SCREAMING_SNAKE_CASE = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __SCREAMING_SNAKE_CASE = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) return scores __SCREAMING_SNAKE_CASE = jax.jit(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jax.jit(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jitted_run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jitted_run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # scores should be equal self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class A__: lowerCAmelCase = MBartConfig lowerCAmelCase = {} lowerCAmelCase = '''gelu''' def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict['''input_ids'''] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict['''head_mask'''] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]: if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__( __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _a ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class A__( unittest.TestCase ): lowerCAmelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] lowerCAmelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] lowerCAmelCase = '''facebook/mbart-large-en-ro''' @cached_property def _a ( self : Optional[int] ) -> str: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self._assert_generated_batch_equal_expected()
690
0
"""simple docstring""" import math def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float: if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(UpperCAmelCase__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
713
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={ "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class A__( __magic_name__ ): lowerCAmelCase = '''van''' def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_sizes __SCREAMING_SNAKE_CASE = strides __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = mlp_ratios __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = layer_scale_init_value __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = dropout_rate
690
0
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
714
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ ={"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def _a ( UpperCAmelCase__ ) -> Dict: __SCREAMING_SNAKE_CASE = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class A__( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowerCAmelCase = StableDiffusionLatentUpscalePipeline lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase = frozenset([] ) lowerCAmelCase = True @property def _a ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = (16, 16) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE ) return image def _a ( self : Dict ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__SCREAMING_SNAKE_CASE , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=__SCREAMING_SNAKE_CASE , only_cross_attention=__SCREAMING_SNAKE_CASE , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) __SCREAMING_SNAKE_CASE = EulerDiscreteScheduler(prediction_type='''sample''' ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict=0 ) -> int: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def _a ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = '''cpu''' __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 2_56, 2_56, 3) ) __SCREAMING_SNAKE_CASE = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) __SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 ) def _a ( self : Dict ) -> List[str]: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def _a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def _a ( self : Union[str, Any] ) -> List[str]: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def _a ( self : Optional[int] ) -> str: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def _a ( self : Tuple ) -> Optional[Any]: """simple docstring""" super().test_save_load_local(expected_max_difference=3E-3 ) def _a ( self : int ) -> Union[str, Any]: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=3E-3 ) def _a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue __SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , scheduler_enum.name ) __SCREAMING_SNAKE_CASE = scheduler_cls.from_config(pipe.scheduler.config ) __SCREAMING_SNAKE_CASE = pipe(**__SCREAMING_SNAKE_CASE )[0] outputs.append(__SCREAMING_SNAKE_CASE ) assert check_same_shape(__SCREAMING_SNAKE_CASE ) @require_torch_gpu @slow class A__( unittest.TestCase ): def _a ( self : Tuple ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.manual_seed(33 ) __SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) __SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) __SCREAMING_SNAKE_CASE = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' __SCREAMING_SNAKE_CASE = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type='''latent''' ).images __SCREAMING_SNAKE_CASE = upscaler( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ).images[0] __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5E-2 def _a ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.manual_seed(33 ) __SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) __SCREAMING_SNAKE_CASE = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' __SCREAMING_SNAKE_CASE = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) __SCREAMING_SNAKE_CASE = upscaler( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , guidance_scale=0 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ).images[0] __SCREAMING_SNAKE_CASE = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-2
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class A__: def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Tuple=64 , __SCREAMING_SNAKE_CASE : str=None ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = np.random.default_rng(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = length __SCREAMING_SNAKE_CASE = rng.normal(size=(length,) ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.length def __getitem__( self : int , __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" return {"x": self.x[i], "y": self.y[i]} class A__( torch.nn.Module ): def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : List[str]=False ) -> List[Any]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __SCREAMING_SNAKE_CASE = True def _a ( self : int , __SCREAMING_SNAKE_CASE : int=None ) -> List[str]: """simple docstring""" if self.first_batch: print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) __SCREAMING_SNAKE_CASE = False return x * self.a[0] + self.b[0] class A__( torch.nn.Module ): def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : int=False ) -> Optional[int]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(__SCREAMING_SNAKE_CASE ).float() ) __SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(__SCREAMING_SNAKE_CASE ).float() ) __SCREAMING_SNAKE_CASE = True def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> List[Any]: """simple docstring""" if self.first_batch: print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) __SCREAMING_SNAKE_CASE = False return x * self.a + self.b def _a ( UpperCAmelCase__ , UpperCAmelCase__ = 16 ) -> int: from datasets import load_dataset from transformers import AutoTokenizer __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __SCREAMING_SNAKE_CASE = load_dataset('''csv''' , data_files=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = datasets['''train'''].unique('''label''' ) __SCREAMING_SNAKE_CASE = {v: i for i, v in enumerate(UpperCAmelCase__ )} def tokenize_function(UpperCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' ) if "label" in examples: __SCREAMING_SNAKE_CASE = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __SCREAMING_SNAKE_CASE = datasets.map( UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(UpperCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCAmelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(UpperCAmelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['''train'''] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=2 ) __SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
716
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A__( unittest.TestCase ): def _a ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[str] ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : str ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = CLIPConfig() # Create a dummy config file with image_proceesor_type __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict() config_dict.pop('''image_processor_type''' ) __SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE ) # save in new folder model_config.save_pretrained(__SCREAMING_SNAKE_CASE ) config.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # make sure private variable is not incorrectly saved __SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> str: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' ) def _a ( self : Any ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def _a ( self : Dict ) -> Dict: """simple docstring""" with self.assertRaisesRegex( __SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _a ( self : int ) -> Any: """simple docstring""" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _a ( self : Optional[Any] ) -> str: """simple docstring""" try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__SCREAMING_SNAKE_CASE ): AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json''' __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) ) __SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _a ( self : int ) -> List[Any]: """simple docstring""" class A__( __magic_name__ ): lowerCAmelCase = True try: AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE ) AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub __SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
690
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowerCAmelCase__ =logging.get_logger(__name__) @dataclass class A__( __magic_name__ ): lowerCAmelCase = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : int , **__SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __SCREAMING_SNAKE_CASE = deprecated_arg[3:] setattr(self , __SCREAMING_SNAKE_CASE , not kwargs.pop(__SCREAMING_SNAKE_CASE ) ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) __SCREAMING_SNAKE_CASE = kwargs.pop('''torchscript''' , self.torchscript ) __SCREAMING_SNAKE_CASE = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) __SCREAMING_SNAKE_CASE = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Trace the models using torchscript'''} ) lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) lowerCAmelCase = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def _a ( self : int ) -> Tuple["torch.device", int]: """simple docstring""" requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: __SCREAMING_SNAKE_CASE = torch.device('''cpu''' ) __SCREAMING_SNAKE_CASE = 0 elif is_torch_tpu_available(): __SCREAMING_SNAKE_CASE = xm.xla_device() __SCREAMING_SNAKE_CASE = 0 else: __SCREAMING_SNAKE_CASE = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) __SCREAMING_SNAKE_CASE = torch.cuda.device_count() return device, n_gpu @property def _a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return is_torch_tpu_available() and self.tpu @property def _a ( self : Any ) -> int: """simple docstring""" requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _a ( self : Optional[int] ) -> "torch.device": """simple docstring""" requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def _a ( self : Any ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def _a ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return self.n_gpu > 0
717
"""simple docstring""" import math lowerCAmelCase__ =10 lowerCAmelCase__ =7 lowerCAmelCase__ =BALLS_PER_COLOUR * NUM_COLOURS def _a ( UpperCAmelCase__ = 20 ) -> str: __SCREAMING_SNAKE_CASE = math.comb(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
690
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=True , UpperCAmelCase__="pt" ) -> str: __SCREAMING_SNAKE_CASE = {'''add_prefix_space''': True} if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not line.startswith(''' ''' ) else {} __SCREAMING_SNAKE_CASE = padding_side return tokenizer( [line] , max_length=UpperCAmelCase__ , padding='''max_length''' if pad_to_max_length else None , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = input_ids.ne(UpperCAmelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A__( __magic_name__ ): def __init__( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple="train" , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str="" , ) -> Union[str, Any]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.source''' ) __SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ).joinpath(type_path + '''.target''' ) __SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file ) __SCREAMING_SNAKE_CASE = max_source_length __SCREAMING_SNAKE_CASE = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = prefix if n_obs is not None: __SCREAMING_SNAKE_CASE = self.src_lens[:n_obs] __SCREAMING_SNAKE_CASE = src_lang __SCREAMING_SNAKE_CASE = tgt_lang def __len__( self : List[str] ) -> Optional[Any]: """simple docstring""" return len(self.src_lens ) def __getitem__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict[str, torch.Tensor]: """simple docstring""" __SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1 __SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) , __SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) __SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) , __SCREAMING_SNAKE_CASE ).rstrip('''\n''' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __SCREAMING_SNAKE_CASE = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer ) __SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer __SCREAMING_SNAKE_CASE = encode_line(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.max_source_length , '''right''' ) __SCREAMING_SNAKE_CASE = encode_line(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.max_target_length , '''right''' ) __SCREAMING_SNAKE_CASE = source_inputs['''input_ids'''].squeeze() __SCREAMING_SNAKE_CASE = target_inputs['''input_ids'''].squeeze() __SCREAMING_SNAKE_CASE = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" return [len(__SCREAMING_SNAKE_CASE ) for x in Path(__SCREAMING_SNAKE_CASE ).open().readlines()] def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Dict[str, torch.Tensor]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.stack([x['''input_ids'''] for x in batch] ) __SCREAMING_SNAKE_CASE = torch.stack([x['''attention_mask'''] for x in batch] ) __SCREAMING_SNAKE_CASE = torch.stack([x['''decoder_input_ids'''] for x in batch] ) __SCREAMING_SNAKE_CASE = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) __SCREAMING_SNAKE_CASE = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ) else self.tokenizer.pad_token_id ) __SCREAMING_SNAKE_CASE = trim_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = trim_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch lowerCAmelCase__ =getLogger(__name__) def _a ( UpperCAmelCase__ ) -> List[str]: return list(itertools.chain.from_iterable(UpperCAmelCase__ ) ) def _a ( UpperCAmelCase__ ) -> None: __SCREAMING_SNAKE_CASE = get_git_info() save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''git_log.json''' ) ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=4 , **UpperCAmelCase__ ) -> str: with open(UpperCAmelCase__ , '''w''' ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ , **UpperCAmelCase__ ) def _a ( UpperCAmelCase__ ) -> List[Any]: with open(UpperCAmelCase__ ) as f: return json.load(UpperCAmelCase__ ) def _a ( ) -> Optional[int]: __SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = { '''repo_id''': str(UpperCAmelCase__ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List: return list(map(UpperCAmelCase__ , UpperCAmelCase__ ) ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: with open(UpperCAmelCase__ , '''wb''' ) as f: return pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def _a ( UpperCAmelCase__ ) -> List[str]: def remove_articles(UpperCAmelCase__ ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , UpperCAmelCase__ ) def white_space_fix(UpperCAmelCase__ ): return " ".join(text.split() ) def remove_punc(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCAmelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = normalize_answer(UpperCAmelCase__ ).split() __SCREAMING_SNAKE_CASE = normalize_answer(UpperCAmelCase__ ).split() __SCREAMING_SNAKE_CASE = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = sum(common.values() ) if num_same == 0: return 0 __SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall) return fa def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict: assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 0 for hypo, pred in zip(UpperCAmelCase__ , UpperCAmelCase__ ): em += exact_match_score(UpperCAmelCase__ , UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: em /= len(UpperCAmelCase__ ) return {"em": em} def _a ( UpperCAmelCase__ ) -> Optional[Any]: return model_prefix.startswith('''rag''' ) def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __SCREAMING_SNAKE_CASE = '''dropout_rate''' for p in extra_params: if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): if not hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__ , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCAmelCase__ ) ) delattr(UpperCAmelCase__ , UpperCAmelCase__ ) continue __SCREAMING_SNAKE_CASE = p if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) else equivalent_param[p] setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) delattr(UpperCAmelCase__ , UpperCAmelCase__ ) return hparams, config
718
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase__ =logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__( __magic_name__ ): def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' ) __SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] ) ] return result
690
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase__ =None lowerCAmelCase__ =logging.get_logger(__name__) lowerCAmelCase__ ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowerCAmelCase__ ={ "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } lowerCAmelCase__ ={ "xlnet-base-cased": None, "xlnet-large-cased": None, } lowerCAmelCase__ ="▁" # Segments (not really needed) lowerCAmelCase__ =0 lowerCAmelCase__ =1 lowerCAmelCase__ =2 lowerCAmelCase__ =3 lowerCAmelCase__ =4 class A__( __magic_name__ ): lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = '''left''' lowerCAmelCase = XLNetTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<sep>" , __SCREAMING_SNAKE_CASE : str="<pad>" , __SCREAMING_SNAKE_CASE : int="<cls>" , __SCREAMING_SNAKE_CASE : int="<mask>" , __SCREAMING_SNAKE_CASE : List[Any]=["<eop>", "<eod>"] , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = False if not self.vocab_file else True def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _a ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
719
"""simple docstring""" from __future__ import annotations from collections.abc import Callable lowerCAmelCase__ =list[list[float | int]] def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , UpperCAmelCase__ ): for row in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(UpperCAmelCase__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ ) ] def _a ( UpperCAmelCase__ ) -> Callable[[int], int]: __SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(UpperCAmelCase__ ): for col in range(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ ) def interpolated_func(UpperCAmelCase__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCAmelCase__ ) ) return interpolated_func def _a ( UpperCAmelCase__ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int: __SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ): x_val += 1 ret += poly(UpperCAmelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
690
0
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCAmelCase__ =False class A__( unittest.TestCase ): pass @nightly @require_torch_gpu class A__( unittest.TestCase ): def _a ( self : str ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = '''A painting of a squirrel eating a burger ''' __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(__SCREAMING_SNAKE_CASE ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = generator.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = '''A painting of a squirrel eating a burger ''' __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images __SCREAMING_SNAKE_CASE = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __SCREAMING_SNAKE_CASE = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
720
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _a ( UpperCAmelCase__ = "isbn/0140328726" ) -> dict: __SCREAMING_SNAKE_CASE = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes if new_olid.count('''/''' ) != 1: __SCREAMING_SNAKE_CASE = f"""{olid} is not a valid Open Library olid""" raise ValueError(UpperCAmelCase__ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _a ( UpperCAmelCase__ ) -> dict: __SCREAMING_SNAKE_CASE = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } __SCREAMING_SNAKE_CASE = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __SCREAMING_SNAKE_CASE = [ get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors'''] ] __SCREAMING_SNAKE_CASE = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = ''', '''.join(UpperCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase__ =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(F'''\nSearching Open Library for ISBN: {isbn}...\n''') try: lowerCAmelCase__ =summarize_book(get_openlibrary_data(F'''isbn/{isbn}''')) print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'''Sorry, there are no results for ISBN: {isbn}.''')
690
0
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _a ( UpperCAmelCase__ , UpperCAmelCase__=10 ) -> Any: __SCREAMING_SNAKE_CASE = [] for _ in range(UpperCAmelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _a ( UpperCAmelCase__ , UpperCAmelCase__=10 ) -> Tuple: __SCREAMING_SNAKE_CASE = [] for step in range(UpperCAmelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , '''schedule.bin''' ) torch.save(scheduler.state_dict() , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ ) scheduler.load_state_dict(UpperCAmelCase__ ) return lrs @require_torch class A__( unittest.TestCase ): def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) ) for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] ) __SCREAMING_SNAKE_CASE = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __SCREAMING_SNAKE_CASE = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_00 ): __SCREAMING_SNAKE_CASE = criterion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _a ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] ) __SCREAMING_SNAKE_CASE = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __SCREAMING_SNAKE_CASE = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=__SCREAMING_SNAKE_CASE , scale_parameter=__SCREAMING_SNAKE_CASE , warmup_init=__SCREAMING_SNAKE_CASE , ) for _ in range(10_00 ): __SCREAMING_SNAKE_CASE = criterion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class A__( unittest.TestCase ): lowerCAmelCase = nn.Linear(50 , 50 ) if is_torch_available() else None lowerCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None lowerCAmelCase = 10 def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=None ) -> Dict: """simple docstring""" self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) ) for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE , msg=__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __SCREAMING_SNAKE_CASE = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14], ), } for scheduler_func, data in scheds.items(): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data __SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) __SCREAMING_SNAKE_CASE = unwrap_schedule(__SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , ) __SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule __SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(__SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , msg=f"""failed for {scheduler_func} in save and reload""" ) class A__: def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = fn def __call__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Dict ) -> Tuple: """simple docstring""" return self.fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @classmethod def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = list(map(self , scheduler.lr_lambdas ) )
721
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ =logging.get_logger(__name__) class A__( __magic_name__ ): lowerCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=1 , __SCREAMING_SNAKE_CASE : List[Any]=[16, 16] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : int=4_41_00 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Any: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def _a ( self : str , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __SCREAMING_SNAKE_CASE = {'''audio_values''': padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
690
0
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class A : def __init__( self , snake_case_ ) -> None: _a = value _a = None _a = None class A : def __init__( self , snake_case_ ) -> None: _a = tree def __lowerCAmelCase ( self , snake_case_ ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case : Optional[Any] = 12_8022 __snake_case : List[str] = 12_8028 @require_sentencepiece class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = MaMaaaTokenizer __UpperCAmelCase : int = False __UpperCAmelCase : str = False __UpperCAmelCase : Tuple = True def __lowerCAmelCase ( self ) -> Any: super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = Path(self.tmpdirname ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) _a = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_tokenizer() _a = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __lowerCAmelCase ( self ) -> Any: pass def __lowerCAmelCase ( self ) -> Dict: _a = self.get_tokenizer() _a = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , ) _a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) _a = tokenizer.convert_tokens_to_string(snake_case_ ) self.assertEqual(snake_case_ , "This is a test" ) @slow def __lowerCAmelCase ( self ) -> List[Any]: # fmt: off _a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): __UpperCAmelCase : Any = """facebook/m2m100_418M""" __UpperCAmelCase : Dict = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] __UpperCAmelCase : Optional[Any] = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off __UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __lowerCAmelCase ( cls ) -> int: _a = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) _a = 1 return cls def __lowerCAmelCase ( self ) -> Any: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.tokenizer.get_vocab() self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: _a = "en" _a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) # fmt: off _a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on _a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) _a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = tempfile.mkdtemp() _a = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(snake_case_ ) _a = MaMaaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "en" _a = "fr" _a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" ) _a = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _a = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _a = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _a = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _a = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __lowerCAmelCase ( self ) -> int: _a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS "input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 1_2_8_0_0_6, } , )
691
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : Tuple = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Optional[Any] = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
691
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Tuple = logging.get_logger(__name__) __snake_case : int = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class A ( a ): __UpperCAmelCase : Union[str, Any] = """wav2vec2""" def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]: super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # adapter _a = add_adapter _a = adapter_kernel_size _a = adapter_stride _a = num_adapter_layers _a = output_hidden_size or hidden_size _a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = xvector_output_dim @property def __lowerCAmelCase ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
691
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCAmelCase ( self ) -> List[Any]: _a = 1 _a = 3 _a = (3_2, 3_2) _a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ ) return image @property def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) return model @property def __lowerCAmelCase ( self ) -> str: torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def __lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _a = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(snake_case_ ) @property def __lowerCAmelCase ( self ) -> Optional[int]: def extract(*snake_case_ , **snake_case_ ): class A : def __init__( self ) -> Tuple: _a = torch.ones([0] ) def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]: self.pixel_values.to(snake_case_ ) return self return Out() return extract def __lowerCAmelCase ( self ) -> Dict: _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.dummy_cond_unet _a = PNDMScheduler(skip_prk_steps=snake_case_ ) _a = self.dummy_vae _a = self.dummy_text_encoder _a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) _a = 7_7 _a = self.dummy_image.to(snake_case_ ) _a = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _a = AltDiffusionImgaImgPipeline( unet=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , safety_checker=snake_case_ , feature_extractor=self.dummy_extractor , ) _a = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case_ ) _a = alt_pipe.to(snake_case_ ) alt_pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A painting of a squirrel eating a burger" _a = torch.Generator(device=snake_case_ ).manual_seed(0 ) _a = alt_pipe( [prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case_ , ) _a = output.images _a = torch.Generator(device=snake_case_ ).manual_seed(0 ) _a = alt_pipe( [prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case_ , return_dict=snake_case_ , )[0] _a = image[0, -3:, -3:, -1] _a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _a = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.dummy_cond_unet _a = PNDMScheduler(skip_prk_steps=snake_case_ ) _a = self.dummy_vae _a = self.dummy_text_encoder _a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) _a = 7_7 _a = self.dummy_image.to(snake_case_ ) # put models in fp16 _a = unet.half() _a = vae.half() _a = bert.half() # make sure here that pndm scheduler skips prk _a = AltDiffusionImgaImgPipeline( unet=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , safety_checker=snake_case_ , feature_extractor=self.dummy_extractor , ) _a = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case_ ) _a = alt_pipe.to(snake_case_ ) alt_pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A painting of a squirrel eating a burger" _a = torch.manual_seed(0 ) _a = alt_pipe( [prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="np" , image=snake_case_ , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 _a = init_image.resize((7_6_0, 5_0_4) ) _a = "BAAI/AltDiffusion" _a = AltDiffusionImgaImgPipeline.from_pretrained( snake_case_ , safety_checker=snake_case_ , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _a = "A fantasy landscape, trending on artstation" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , generator=snake_case_ , output_type="np" , ) _a = output.images[0] _a = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) _a = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> List[str]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _a = init_image.resize((7_6_8, 5_1_2) ) _a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) _a = "BAAI/AltDiffusion" _a = AltDiffusionImgaImgPipeline.from_pretrained( snake_case_ , safety_checker=snake_case_ , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _a = "A fantasy landscape, trending on artstation" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , generator=snake_case_ , output_type="np" , ) _a = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number | (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number & ~(1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number ^ (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' import math import qiskit def _lowercase ( lowerCamelCase__ : int = 1, lowerCamelCase__ : int = 1, lowerCamelCase__ : int = 1 ): if ( isinstance(lowerCamelCase__, lowerCamelCase__ ) or isinstance(lowerCamelCase__, lowerCamelCase__ ) or isinstance(lowerCamelCase__, lowerCamelCase__ ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(lowerCamelCase__ ) != input_a) or (math.floor(lowerCamelCase__ ) != input_a) or (math.floor(lowerCamelCase__ ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers _a = qiskit.QuantumRegister(4, "qr" ) _a = qiskit.ClassicalRegister(2, "cr" ) # list the entries _a = [input_a, input_a, carry_in] _a = qiskit.QuantumCircuit(lowerCamelCase__, lowerCamelCase__ ) for i in range(0, 3 ): if entry[i] == 2: quantum_circuit.h(lowerCamelCase__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCamelCase__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCamelCase__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate quantum_circuit.cx(0, 1 ) quantum_circuit.ccx(1, 2, 3 ) quantum_circuit.cx(1, 2 ) quantum_circuit.cx(0, 1 ) quantum_circuit.measure([2, 3], lowerCamelCase__ ) # measure the last two qbits _a = qiskit.Aer.get_backend("aer_simulator" ) _a = qiskit.execute(lowerCamelCase__, lowerCamelCase__, shots=1_000 ) return job.result().get_counts(lowerCamelCase__ ) if __name__ == "__main__": print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
691
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __snake_case : List[Any] = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ): _a = True while ask_again: _a = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ): _a = BulletMenu(lowerCamelCase__, lowerCamelCase__ ) _a = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _lowercase ( lowerCamelCase__ : Dict ): _a = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _lowercase ( lowerCamelCase__ : List[Any] ): _a = int(lowerCamelCase__ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _lowercase ( lowerCamelCase__ : str ): return {"yes": True, "no": False}[value.lower()] class A ( argparse.RawDescriptionHelpFormatter ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = usage.replace("<command> [<args>] " , "" ) return usage
691
1
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __snake_case : Optional[Any] = logging.get_logger(__name__) def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : int ): _a = set() _a = [] def parse_line(lowerCamelCase__ : Any ): for line in fp: if isinstance(lowerCamelCase__, lowerCamelCase__ ): _a = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(lowerCamelCase__ ) > 0: _a = "\n".join(lowerCamelCase__ ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(lowerCamelCase__ ) buffer.clear() continue else: _a = line.strip() buffer.append(lowerCamelCase__ ) if from_gh: for filename in os.listdir(lowerCamelCase__ ): _a = os.path.join(lowerCamelCase__, lowerCamelCase__ ) if not os.path.isdir(lowerCamelCase__ ): # read the file if filename != "warnings.txt": continue with open(lowerCamelCase__ ) as fp: parse_line(lowerCamelCase__ ) else: try: with zipfile.ZipFile(lowerCamelCase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowerCamelCase__ ): # read the file if filename != "warnings.txt": continue with z.open(lowerCamelCase__ ) as fp: parse_line(lowerCamelCase__ ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any] ): _a = set() _a = [os.path.join(lowerCamelCase__, lowerCamelCase__ ) for p in os.listdir(lowerCamelCase__ ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase__, lowerCamelCase__ ) ) return selected_warnings if __name__ == "__main__": def _lowercase ( lowerCamelCase__ : Union[str, Any] ): return values.split("," ) __snake_case : str = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) __snake_case : str = parser.parse_args() __snake_case : int = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __snake_case : str = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __snake_case : Tuple = extract_warnings(args.output_dir, args.targets) __snake_case : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[list] ): _a = current_set.copy() for row_index, row in enumerate(lowerCamelCase__ ): _a = row[0] for column_index, column in enumerate(lowerCamelCase__ ): if magnitude == 0: _a = column continue _a = column / magnitude # Subtract to cancel term _a = current_set[0] _a = [first_row] _a = current_set[1::] for row in current_set: _a = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCamelCase__ ) continue for column_index in range(len(lowerCamelCase__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCamelCase__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a = final_set[0] _a = [] _a = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a = simplify(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, lowerCamelCase__ ) _a = resultant return final_set def _lowercase ( lowerCamelCase__ : list[list] ): if len(lowerCamelCase__ ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) _a = len(lowerCamelCase__ ) + 1 if any(len(lowerCamelCase__ ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(lowerCamelCase__ ) == 1: return [equations[0][-1] / equations[0][0]] _a = equations.copy() if any(0 in row for row in data_set ): _a = data_set.copy() _a = [] for row_index, row in enumerate(lowerCamelCase__ ): if 0 not in row: _a = data_set.pop(lowerCamelCase__ ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0, lowerCamelCase__ ) _a = data_set.copy() _a = simplify(lowerCamelCase__ ) _a = simplified[::-1] _a = [] for row in simplified: _a = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a = row.copy()[: len(lowerCamelCase__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCamelCase__ ) == 0: solutions.append(0 ) continue _a = temp_row[1::] _a = temp_row[::-1] for column_index, column in enumerate(lowerCamelCase__ ): current_solution -= column * solutions[column_index] solutions.append(lowerCamelCase__ ) _a = [] for item in solutions: final.append(float(round(lowerCamelCase__, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __snake_case : Tuple = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
691
1
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A ( unittest.TestCase ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(snake_case_ ) for s in shape] )}.npy''' def __lowerCAmelCase ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self , snake_case_=0 , snake_case_=(4, 4, 6_4, 6_4) , snake_case_=False ) -> int: _a = jnp.bfloataa if fpaa else jnp.floataa _a = jnp.array(load_hf_numpy(self.get_file_format(snake_case_ , snake_case_ ) ) , dtype=snake_case_ ) return image def __lowerCAmelCase ( self , snake_case_=False , snake_case_="CompVis/stable-diffusion-v1-4" ) -> List[str]: _a = jnp.bfloataa if fpaa else jnp.floataa _a = "bf16" if fpaa else None _a , _a = FlaxUNetaDConditionModel.from_pretrained( snake_case_ , subfolder="unet" , dtype=snake_case_ , revision=snake_case_ ) return model, params def __lowerCAmelCase ( self , snake_case_=0 , snake_case_=(4, 7_7, 7_6_8) , snake_case_=False ) -> Tuple: _a = jnp.bfloataa if fpaa else jnp.floataa _a = jnp.array(load_hf_numpy(self.get_file_format(snake_case_ , snake_case_ ) ) , dtype=snake_case_ ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [1_7, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1_0_0_0, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]: _a , _a = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case_ ) _a = self.get_latents(snake_case_ , fpaa=snake_case_ ) _a = self.get_encoder_hidden_states(snake_case_ , fpaa=snake_case_ ) _a = model.apply( {"params": params} , snake_case_ , jnp.array(snake_case_ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case_ , ).sample assert sample.shape == latents.shape _a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _a = jnp.array(snake_case_ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case_ , snake_case_ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [1_7, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1_0_0_0, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]: _a , _a = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case_ ) _a = self.get_latents(snake_case_ , shape=(4, 4, 9_6, 9_6) , fpaa=snake_case_ ) _a = self.get_encoder_hidden_states(snake_case_ , shape=(4, 7_7, 1_0_2_4) , fpaa=snake_case_ ) _a = model.apply( {"params": params} , snake_case_ , jnp.array(snake_case_ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case_ , ).sample assert sample.shape == latents.shape _a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _a = jnp.array(snake_case_ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case_ , snake_case_ , atol=1E-2 )
691
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing return x.sum() def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing return i + 1 @dataclass class A : __UpperCAmelCase : int __UpperCAmelCase : str class A ( a ): def __lowerCAmelCase ( self ) -> Tuple: _a = {} _a = [] _a = 1 _a = [1, 2] _a = {"a": 1, "b": 2} _a = {"a": [1, 2], "b": [3, 4]} _a = {"a": {"1": 1}, "b": 2} _a = {"a": 1, "b": 2, "c": 3, "d": 4} _a = {} _a = [] _a = 2 _a = [2, 3] _a = {"a": 2, "b": 3} _a = {"a": [2, 3], "b": [4, 5]} _a = {"a": {"1": 2}, "b": 3} _a = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) _a = 2 self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) _a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} _a = {"a": 2, "b": 0, "c": 2} _a = { "a": np.eye(2 ).astype(snake_case_ ), "b": np.zeros(3 ).astype(snake_case_ ), "c": np.ones(2 ).astype(snake_case_ ), } self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(snake_case_ ): # can't pickle a local lambda map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ ) def __lowerCAmelCase ( self ) -> Any: _a = {"a": 1, "b": 2} _a = {"a": 3, "b": 4} _a = {"a": 5, "b": 6} _a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ ) def __lowerCAmelCase ( self ) -> str: class A : __UpperCAmelCase : Optional[int] = """bar""" _a = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(snake_case_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc", [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ): with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: _a = {F'''{i}''': i for i in range(lowerCamelCase__ )} _a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A ( a ): @require_tf def __lowerCAmelCase ( self ) -> Any: import tensorflow as tf from tensorflow.keras import layers _a = layers.Dense(2 ) def gen_random_output(): _a = tf.random.uniform((1, 3) ) return model(snake_case_ ).numpy() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: import torch def gen_random_output(): _a = torch.nn.Linear(3 , 2 ) _a = torch.rand(1 , 3 ) return model(snake_case_ ).detach().numpy() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __lowerCAmelCase ( self ) -> Optional[int]: def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(4_2 ): _a = gen_random_output() with temp_seed(4_2 ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data", [{}] ) def _lowercase ( lowerCamelCase__ : Any ): _a = NestedDataStructure(lowerCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output", [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ], ) def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ): _a = NestedDataStructure(lowerCamelCase__ ).flatten() assert output == expected_output def _lowercase ( ): _a = A(x=1, y="foobar" ) _a = {"x": 1, "y": "foobar"} assert asdict(lowerCamelCase__ ) == expected_output _a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]} _a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(lowerCamelCase__ ) == expected_output with pytest.raises(lowerCamelCase__ ): asdict([1, A(x=10, y="foo" )] ) def _lowercase ( lowerCamelCase__ : str ): return text.split() def _lowercase ( lowerCamelCase__ : List[Any] ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowercase ( ): with Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _a = [] for yield_time, content in iflatmap_unordered( lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowerCamelCase__ ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(lowerCamelCase__ ) == 4
691
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case : Optional[Any] = 12_8022 __snake_case : List[str] = 12_8028 @require_sentencepiece class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = MaMaaaTokenizer __UpperCAmelCase : int = False __UpperCAmelCase : str = False __UpperCAmelCase : Tuple = True def __lowerCAmelCase ( self ) -> Any: super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = Path(self.tmpdirname ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) _a = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_tokenizer() _a = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __lowerCAmelCase ( self ) -> Any: pass def __lowerCAmelCase ( self ) -> Dict: _a = self.get_tokenizer() _a = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , ) _a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) _a = tokenizer.convert_tokens_to_string(snake_case_ ) self.assertEqual(snake_case_ , "This is a test" ) @slow def __lowerCAmelCase ( self ) -> List[Any]: # fmt: off _a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): __UpperCAmelCase : Any = """facebook/m2m100_418M""" __UpperCAmelCase : Dict = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] __UpperCAmelCase : Optional[Any] = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off __UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __lowerCAmelCase ( cls ) -> int: _a = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) _a = 1 return cls def __lowerCAmelCase ( self ) -> Any: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.tokenizer.get_vocab() self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: _a = "en" _a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) # fmt: off _a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on _a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) _a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = tempfile.mkdtemp() _a = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(snake_case_ ) _a = MaMaaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "en" _a = "fr" _a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" ) _a = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _a = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _a = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _a = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _a = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __lowerCAmelCase ( self ) -> int: _a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS "input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 1_2_8_0_0_6, } , )
691
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(a ) class A ( a ): __UpperCAmelCase : Dict = """rag""" __UpperCAmelCase : Dict = True def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]: super().__init__( bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _a = kwargs.pop("question_encoder" ) _a = question_encoder_config.pop("model_type" ) _a = kwargs.pop("generator" ) _a = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = reduce_loss _a = label_smoothing _a = exclude_bos_score _a = do_marginalize _a = title_sep _a = doc_sep _a = n_docs _a = max_combined_length _a = dataset _a = dataset_split _a = index_name _a = retrieval_vector_size _a = retrieval_batch_size _a = passages_path _a = index_path _a = use_dummy_dataset _a = output_retrieved _a = do_deduplication _a = use_cache if self.forced_eos_token_id is None: _a = getattr(self.generator , "forced_eos_token_id" , snake_case_ ) @classmethod def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = copy.deepcopy(self.__dict__ ) _a = self.question_encoder.to_dict() _a = self.generator.to_dict() _a = self.__class__.model_type return output
691
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __snake_case : Dict = logging.get_logger(__name__) class A ( a ): __UpperCAmelCase : Optional[Any] = ["""audio_values""", """audio_mask"""] def __init__( self , snake_case_=2_0_4_8 , snake_case_=1 , snake_case_=[1_6, 1_6] , snake_case_=1_2_8 , snake_case_=4_4_1_0_0 , snake_case_=8_6 , snake_case_=2_0_4_8 , snake_case_=0.0 , **snake_case_ , ) -> List[Any]: super().__init__( feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ , ) _a = spectrogram_length _a = num_channels _a = patch_size _a = feature_size // self.patch_size[1] _a = n_fft _a = sampling_rate // hop_length_to_sampling_rate _a = sampling_rate _a = padding_value _a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=snake_case_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=snake_case_ , norm="slaney" , mel_scale="slaney" , ).T def __lowerCAmelCase ( self , snake_case_ ) -> np.ndarray: _a = spectrogram( snake_case_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , ) _a = log_spec[:, :-1] _a = log_spec - 20.0 _a = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = False , snake_case_ = False , **snake_case_ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _a = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _a = is_batched_numpy or ( isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(snake_case_ , np.ndarray ): _a = np.asarray(snake_case_ , dtype=np.floataa ) elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , snake_case_ ): _a = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _a = np.array(snake_case_ ).astype(np.floataa ) # convert into correct format for padding _a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _a = np.ones([len(snake_case_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _a = padded_audio_features * self.padding_value for i in range(len(snake_case_ ) ): _a = audio_features[i] _a = feature # return as BatchFeature if return_attention_mask: _a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: _a = {"audio_values": padded_audio_features} _a = BatchFeature(data=snake_case_ , tensor_type=snake_case_ ) return encoded_inputs
691
'''simple docstring''' class A : def __init__( self ) -> List[str]: _a = 0 _a = 0 _a = {} def __lowerCAmelCase ( self , snake_case_ ) -> int: if vertex not in self.adjacency: _a = {} self.num_vertices += 1 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]: self.add_vertex(snake_case_ ) self.add_vertex(snake_case_ ) if head == tail: return _a = weight _a = weight def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.get_edges() for edge in edges: _a , _a , _a = edge edges.remove((tail, head, weight) ) for i in range(len(snake_case_ ) ): _a = list(edges[i] ) edges.sort(key=lambda snake_case_ : e[2] ) for i in range(len(snake_case_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _a = edges[i][2] + 1 for edge in edges: _a , _a , _a = edge _a = weight _a = weight def __str__( self ) -> Optional[int]: _a = "" for tail in self.adjacency: for head in self.adjacency[tail]: _a = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __lowerCAmelCase ( self ) -> Any: return self.adjacency.keys() @staticmethod def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any: _a = Graph() if vertices is None: _a = [] if edges is None: _a = [] for vertex in vertices: g.add_vertex(snake_case_ ) for edge in edges: g.add_edge(*snake_case_ ) return g class A : def __init__( self ) -> Optional[int]: _a = {} _a = {} def __len__( self ) -> List[Any]: return len(self.parent ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: if item in self.parent: return self.find(snake_case_ ) _a = item _a = 0 return item def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]: if item not in self.parent: return self.make_set(snake_case_ ) if item != self.parent[item]: _a = self.find(self.parent[item] ) return self.parent[item] def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]: _a = self.find(snake_case_ ) _a = self.find(snake_case_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _a = roota return roota if self.rank[roota] < self.rank[roota]: _a = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _a = roota return roota return None @staticmethod def __lowerCAmelCase ( snake_case_ ) -> Tuple: _a = graph.num_vertices _a = Graph.UnionFind() _a = [] while num_components > 1: _a = {} for vertex in graph.get_vertices(): _a = -1 _a = graph.get_edges() for edge in edges: _a , _a , _a = edge edges.remove((tail, head, weight) ) for edge in edges: _a , _a , _a = edge _a = union_find.find(snake_case_ ) _a = union_find.find(snake_case_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _a = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _a = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _a , _a , _a = cheap_edge[vertex] if union_find.find(snake_case_ ) != union_find.find(snake_case_ ): union_find.union(snake_case_ , snake_case_ ) mst_edges.append(cheap_edge[vertex] ) _a = num_components - 1 _a = Graph.build(edges=snake_case_ ) return mst
691
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> Dict: torch.manual_seed(0 ) _a = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __lowerCAmelCase ( self ) -> Any: _a = self.dummy_uncond_unet _a = PNDMScheduler() _a = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ ) pndm.to(snake_case_ ) pndm.set_progress_bar_config(disable=snake_case_ ) _a = torch.manual_seed(0 ) _a = pndm(generator=snake_case_ , num_inference_steps=2_0 , output_type="numpy" ).images _a = torch.manual_seed(0 ) _a = pndm(generator=snake_case_ , num_inference_steps=2_0 , output_type="numpy" , return_dict=snake_case_ )[0] _a = image[0, -3:, -3:, -1] _a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _a = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[Any]: _a = "google/ddpm-cifar10-32" _a = UNetaDModel.from_pretrained(snake_case_ ) _a = PNDMScheduler() _a = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ ) pndm.to(snake_case_ ) pndm.set_progress_bar_config(disable=snake_case_ ) _a = torch.manual_seed(0 ) _a = pndm(generator=snake_case_ , output_type="numpy" ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _a = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
691
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case : Tuple = "\\n Text data.\n Second line of data." __snake_case : int = "file" @pytest.fixture(scope="session" ) def _lowercase ( lowerCamelCase__ : Optional[Any] ): _a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") _a = bytes(lowerCamelCase__, "utf-8" ) with zstd.open(lowerCamelCase__, "wb" ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture def _lowercase ( lowerCamelCase__ : int ): with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f: f.write(lowerCamelCase__ ) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] ) def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ): _a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} _a = input_paths[compression_format] _a = tmp_path / "cache" _a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ ) _a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ ) with open(lowerCamelCase__ ) as f: _a = f.read() with open(lowerCamelCase__ ) as f: _a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False] ) @pytest.mark.parametrize("default_cache_dir", [True, False] ) def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ): _a = "custom_cache" _a = "custom_extracted_dir" _a = tmp_path / "custom_extracted_path" if default_extracted: _a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) ) _a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _a = xz_file _a = ( DownloadConfig(extract_compressed_file=lowerCamelCase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ ) ) _a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ ) assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected def _lowercase ( lowerCamelCase__ : Union[str, Any] ): # absolute path _a = str(Path(lowerCamelCase__ ).resolve() ) assert cached_path(lowerCamelCase__ ) == text_file # relative path _a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCamelCase__ ) == text_file def _lowercase ( lowerCamelCase__ : Dict ): # absolute path _a = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(lowerCamelCase__ ): cached_path(lowerCamelCase__ ) # relative path _a = "./__missing_file__.txt" with pytest.raises(lowerCamelCase__ ): cached_path(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(lowerCamelCase__ ) as f: _a = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( ): with pytest.raises(lowerCamelCase__ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): http_get("https://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): fsspec_head("s3://huggingface.co" )
691
1
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __snake_case : Union[str, Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowercase ( lowerCamelCase__ : List[Any] ): _a = {} state_dict.pop("pixel_mean", lowerCamelCase__ ) state_dict.pop("pixel_std", lowerCamelCase__ ) _a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _a = key.replace(lowerCamelCase__, lowerCamelCase__ ) if re.match(lowerCamelCase__, lowerCamelCase__ ): _a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) ) if layer_nb == 0: _a = key.replace("layers.0", "proj_in" ) elif layer_nb == 1: _a = key.replace("layers.1", "layers.0" ) elif layer_nb == 2: _a = key.replace("layers.2", "proj_out" ) _a = value _a = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ): _a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _a = SamConfig() elif "sam_vit_l" in model_name: _a = SamVisionConfig( hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) _a = SamConfig( vision_config=lowerCamelCase__, ) elif "sam_vit_h" in model_name: _a = SamVisionConfig( hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) _a = SamConfig( vision_config=lowerCamelCase__, ) _a = torch.load(lowerCamelCase__, map_location="cpu" ) _a = replace_keys(lowerCamelCase__ ) _a = SamImageProcessor() _a = SamProcessor(image_processor=lowerCamelCase__ ) _a = SamModel(lowerCamelCase__ ) hf_model.load_state_dict(lowerCamelCase__ ) _a = hf_model.to("cuda" ) _a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" _a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" ) _a = [[[400, 650]]] _a = [[1]] _a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 _a = ((75, 275, 1_725, 850),) _a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. _a = [[[400, 650], [800, 650]]] _a = [[1, 1]] _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": __snake_case : Union[str, Any] = argparse.ArgumentParser() __snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) __snake_case : str = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
691
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __snake_case : Union[str, Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowercase ( lowerCamelCase__ : List[Any] ): _a = {} state_dict.pop("pixel_mean", lowerCamelCase__ ) state_dict.pop("pixel_std", lowerCamelCase__ ) _a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _a = key.replace(lowerCamelCase__, lowerCamelCase__ ) if re.match(lowerCamelCase__, lowerCamelCase__ ): _a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) ) if layer_nb == 0: _a = key.replace("layers.0", "proj_in" ) elif layer_nb == 1: _a = key.replace("layers.1", "layers.0" ) elif layer_nb == 2: _a = key.replace("layers.2", "proj_out" ) _a = value _a = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ): _a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _a = SamConfig() elif "sam_vit_l" in model_name: _a = SamVisionConfig( hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) _a = SamConfig( vision_config=lowerCamelCase__, ) elif "sam_vit_h" in model_name: _a = SamVisionConfig( hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) _a = SamConfig( vision_config=lowerCamelCase__, ) _a = torch.load(lowerCamelCase__, map_location="cpu" ) _a = replace_keys(lowerCamelCase__ ) _a = SamImageProcessor() _a = SamProcessor(image_processor=lowerCamelCase__ ) _a = SamModel(lowerCamelCase__ ) hf_model.load_state_dict(lowerCamelCase__ ) _a = hf_model.to("cuda" ) _a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" _a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" ) _a = [[[400, 650]]] _a = [[1]] _a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 _a = ((75, 275, 1_725, 850),) _a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. _a = [[[400, 650], [800, 650]]] _a = [[1, 1]] _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": __snake_case : Union[str, Any] = argparse.ArgumentParser() __snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) __snake_case : str = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
691
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _lowercase ( ): _a = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" _a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" ) return image def _lowercase ( lowerCamelCase__ : Optional[int] ): _a = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") ) # fmt: on return rename_keys def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict ): _a = dct.pop(lowerCamelCase__ ) _a = val def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _a = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) _a = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict _a = torch.cat((q_bias, torch.zeros_like(lowerCamelCase__, requires_grad=lowerCamelCase__ ), v_bias) ) _a = qkv_bias def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any] ): _a = 364 if "coco" in model_name else 224 _a = BlipaVisionConfig(image_size=lowerCamelCase__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _a = OPTConfig.from_pretrained("facebook/opt-2.7b", eos_token_id=lowerCamelCase__ ).to_dict() elif "opt-6.7b" in model_name: _a = OPTConfig.from_pretrained("facebook/opt-6.7b", eos_token_id=lowerCamelCase__ ).to_dict() elif "t5-xl" in model_name: _a = TaConfig.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _a = TaConfig.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1 ).to_dict() _a = BlipaConfig(vision_config=lowerCamelCase__, text_config=lowerCamelCase__ ) return config, image_size @torch.no_grad() def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Dict=None, lowerCamelCase__ : List[Any]=False ): _a = ( AutoTokenizer.from_pretrained("facebook/opt-2.7b" ) if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl" ) ) _a = tokenizer("\n", add_special_tokens=lowerCamelCase__ ).input_ids[0] _a , _a = get_blipa_config(lowerCamelCase__, eos_token_id=lowerCamelCase__ ) _a = BlipaForConditionalGeneration(lowerCamelCase__ ).eval() _a = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } _a , _a = model_name_to_original[model_name] # load original model print("Loading original model..." ) _a = "cuda" if torch.cuda.is_available() else "cpu" _a , _a , _a = load_model_and_preprocess( name=lowerCamelCase__, model_type=lowerCamelCase__, is_eval=lowerCamelCase__, device=lowerCamelCase__ ) original_model.eval() print("Done!" ) # update state dict keys _a = original_model.state_dict() _a = create_rename_keys(lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _a = state_dict.pop(lowerCamelCase__ ) if key.startswith("Qformer.bert" ): _a = key.replace("Qformer.bert", "qformer" ) if "attention.self" in key: _a = key.replace("self", "attention" ) if "opt_proj" in key: _a = key.replace("opt_proj", "language_projection" ) if "t5_proj" in key: _a = key.replace("t5_proj", "language_projection" ) if key.startswith("opt" ): _a = key.replace("opt", "language" ) if key.startswith("t5" ): _a = key.replace("t5", "language" ) _a = val # read in qv biases read_in_q_v_bias(lowerCamelCase__, lowerCamelCase__ ) _a , _a = hf_model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ ) assert len(lowerCamelCase__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _a = load_demo_image() _a = vis_processors["eval"](lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ ) _a = tokenizer(["\n"], return_tensors="pt" ).input_ids.to(lowerCamelCase__ ) # create processor _a = BlipImageProcessor( size={"height": image_size, "width": image_size}, image_mean=lowerCamelCase__, image_std=lowerCamelCase__ ) _a = BlipaProcessor(image_processor=lowerCamelCase__, tokenizer=lowerCamelCase__ ) _a = processor(images=lowerCamelCase__, return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # make sure processor creates exact same pixel values assert torch.allclose(lowerCamelCase__, lowerCamelCase__ ) original_model.to(lowerCamelCase__ ) hf_model.to(lowerCamelCase__ ) with torch.no_grad(): if "opt" in model_name: _a = original_model({"image": original_pixel_values, "text_input": [""]} ).logits _a = hf_model(lowerCamelCase__, lowerCamelCase__ ).logits else: _a = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits _a = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 ) _a = hf_model(lowerCamelCase__, lowerCamelCase__, labels=lowerCamelCase__ ).logits assert original_logits.shape == logits.shape print("First values of original logits:", original_logits[0, :3, :3] ) print("First values of HF logits:", logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _a = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]], device=lowerCamelCase__ ) assert torch.allclose(logits[0, :3, :3], lowerCamelCase__, atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _a = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]], device=lowerCamelCase__ ) else: # cast to same type _a = logits.dtype assert torch.allclose(original_logits.to(lowerCamelCase__ ), lowerCamelCase__, atol=1e-2 ) print("Looks ok!" ) print("Generating a caption..." ) _a = "" _a = tokenizer(lowerCamelCase__, return_tensors="pt" ).input_ids.to(lowerCamelCase__ ) _a = original_model.generate({"image": original_pixel_values} ) _a = hf_model.generate( lowerCamelCase__, lowerCamelCase__, do_sample=lowerCamelCase__, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, ) print("Original generation:", lowerCamelCase__ ) _a = input_ids.shape[1] _a = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=lowerCamelCase__ ) _a = [text.strip() for text in output_text] print("HF generation:", lowerCamelCase__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(lowerCamelCase__ ) hf_model.save_pretrained(lowerCamelCase__ ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": __snake_case : Union[str, Any] = argparse.ArgumentParser() __snake_case : Tuple = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __snake_case : Tuple = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
691
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase__ : List[Any] ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _a = [] for i in range(lowerCamelCase__ ): _a = i / num_diffusion_timesteps _a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) ) return torch.tensor(lowerCamelCase__, dtype=torch.floataa ) class A ( a , a ): __UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers] __UpperCAmelCase : Optional[int] = 2 @register_to_config def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]: if trained_betas is not None: _a = torch.tensor(snake_case_ , dtype=torch.floataa ) elif beta_schedule == "linear": _a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _a = betas_for_alpha_bar(snake_case_ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _a = 1.0 - self.betas _a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(snake_case_ , snake_case_ , snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict: if schedule_timesteps is None: _a = self.timesteps _a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _a = 1 if len(snake_case_ ) > 1 else 0 else: _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep _a = self._index_counter[timestep_int] return indices[pos].item() @property def __lowerCAmelCase ( self ) -> Dict: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor: _a = self.index_for_timestep(snake_case_ ) if self.state_in_first_order: _a = self.sigmas[step_index] else: _a = self.sigmas_interpol[step_index] _a = sample / ((sigma**2 + 1) ** 0.5) return sample def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]: _a = num_inference_steps _a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) _a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ ) _a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ ) _a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _a = torch.from_numpy(snake_case_ ).to(device=snake_case_ ) # interpolate sigmas _a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() _a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(snake_case_ ).startswith("mps" ): # mps does not support float64 _a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa ) else: _a = torch.from_numpy(snake_case_ ).to(snake_case_ ) # interpolate timesteps _a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype ) _a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() _a = torch.cat([timesteps[:1], interleaved_timesteps] ) _a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _a = defaultdict(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: # get log sigma _a = sigma.log() # get distribution _a = log_sigma - self.log_sigmas[:, None] # get sigmas range _a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _a = low_idx + 1 _a = self.log_sigmas[low_idx] _a = self.log_sigmas[high_idx] # interpolate sigmas _a = (low - log_sigma) / (low - high) _a = w.clamp(0 , 1 ) # transform interpolation to time range _a = (1 - w) * low_idx + w * high_idx _a = t.view(sigma.shape ) return t @property def __lowerCAmelCase ( self ) -> List[Any]: return self.sample is None def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]: _a = self.index_for_timestep(snake_case_ ) # advance index counter by 1 _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _a = self.sigmas[step_index] _a = self.sigmas_interpol[step_index + 1] _a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _a = self.sigmas[step_index - 1] _a = self.sigmas_interpol[step_index] _a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _a = 0 _a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _a = sigma_interpol - sigma_hat # store for 2nd order step _a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _a = sigma_next - sigma_hat _a = self.sample _a = None _a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples _a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ): # mps does not support float64 _a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) _a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: _a = self.timesteps.to(original_samples.device ) _a = timesteps.to(original_samples.device ) _a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps] _a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _a = sigma.unsqueeze(-1 ) _a = original_samples + noise * sigma return noisy_samples def __len__( self ) -> str: return self.config.num_train_timesteps
691
1
'''simple docstring''' from collections.abc import Generator from math import sin def _lowercase ( lowerCamelCase__ : bytes ): if len(lowerCamelCase__ ) != 32: raise ValueError("Input must be of length 32" ) _a = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _lowercase ( lowerCamelCase__ : int ): if i < 0: raise ValueError("Input must be non-negative" ) _a = format(lowerCamelCase__, "08x" )[-8:] _a = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def _lowercase ( lowerCamelCase__ : bytes ): _a = b"" for char in message: bit_string += format(lowerCamelCase__, "08b" ).encode("utf-8" ) _a = format(len(lowerCamelCase__ ), "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(lowerCamelCase__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _lowercase ( lowerCamelCase__ : bytes ): if len(lowerCamelCase__ ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0, len(lowerCamelCase__ ), 512 ): _a = bit_string[pos : pos + 512] _a = [] for i in range(0, 512, 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) ) yield block_words def _lowercase ( lowerCamelCase__ : int ): if i < 0: raise ValueError("Input must be non-negative" ) _a = format(lowerCamelCase__, "032b" ) _a = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(lowerCamelCase__, 2 ) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return (a + b) % 2**32 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _lowercase ( lowerCamelCase__ : bytes ): _a = preprocess(lowerCamelCase__ ) _a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _a = 0x67452301 _a = 0xefcdab89 _a = 0x98badcfe _a = 0x10325476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(lowerCamelCase__ ): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(lowerCamelCase__ )) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(lowerCamelCase__, left_rotate_aa(lowerCamelCase__, shift_amounts[i] ) ) # Add hashed chunk to running total _a = sum_aa(lowerCamelCase__, lowerCamelCase__ ) _a = sum_aa(lowerCamelCase__, lowerCamelCase__ ) _a = sum_aa(lowerCamelCase__, lowerCamelCase__ ) _a = sum_aa(lowerCamelCase__, lowerCamelCase__ ) _a = reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) + reformat_hex(lowerCamelCase__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ): # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ): # Color current vertex _a = i # Validate coloring if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ): return True # Backtrack _a = -1 return False def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ): _a = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ): return colored_vertices return []
691
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _lowercase ( lowerCamelCase__ : Tuple ): _a = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(lowerCamelCase__, lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[int] ): _a = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _a = s_dict.pop(lowerCamelCase__ ) elif "subsample" in key: _a = s_dict.pop(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : List[Any] ): _a , _a = emb.weight.shape _a = nn.Linear(lowerCamelCase__, lowerCamelCase__, bias=lowerCamelCase__ ) _a = emb.weight.data return lin_layer def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Union[str, Any] ): _a = torch.load(lowerCamelCase__, map_location="cpu" ) _a = mam_aaa["args"] _a = mam_aaa["model"] _a = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(lowerCamelCase__ ) rename_keys(lowerCamelCase__ ) _a = state_dict["decoder.embed_tokens.weight"].shape[0] _a = args.share_decoder_input_output_embed _a = [int(lowerCamelCase__ ) for i in args.conv_kernel_sizes.split("," )] _a = SpeechaTextConfig( vocab_size=lowerCamelCase__, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="relu", num_conv_layers=len(lowerCamelCase__ ), conv_channels=args.conv_channels, conv_kernel_sizes=lowerCamelCase__, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=lowerCamelCase__, num_beams=5, max_length=200, use_cache=lowerCamelCase__, decoder_start_token_id=2, early_stopping=lowerCamelCase__, ) _a = SpeechaTextForConditionalGeneration(lowerCamelCase__ ) _a , _a = model.model.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0 and not set(lowerCamelCase__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: _a = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _a = lm_head_weights model.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") __snake_case : Dict = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
691
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class A : def __init__( self , snake_case_ ) -> Optional[int]: _a = str(id_ ) _a = None _a = None _a = [] _a = {} # {vertex:distance} def __lt__( self , snake_case_ ) -> Optional[Any]: return self.key < other.key def __repr__( self ) -> Union[str, Any]: return self.id def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: self.neighbors.append(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any: _a = weight def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ ) graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ): _a = [] for u in graph: _a = math.inf _a = None _a = 0 _a = graph[:] while q: _a = min(lowerCamelCase__ ) q.remove(lowerCamelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _a = u _a = u.edges[v.id] for i in range(1, len(lowerCamelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ): for u in graph: _a = math.inf _a = None _a = 0 _a = list(lowerCamelCase__ ) hq.heapify(lowerCamelCase__ ) while h: _a = hq.heappop(lowerCamelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _a = u _a = u.edges[v.id] hq.heapify(lowerCamelCase__ ) for i in range(1, len(lowerCamelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _lowercase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
691
'''simple docstring''' __snake_case : List[str] = "Tobias Carryer" from time import time class A : def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008 _a = multiplier _a = increment _a = modulo _a = seed def __lowerCAmelCase ( self ) -> str: _a = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
691
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A ( a ): __UpperCAmelCase : List[str] = """bert-generation""" def __init__( self , snake_case_=5_0_3_5_8 , snake_case_=1_0_2_4 , snake_case_=2_4 , snake_case_=1_6 , snake_case_=4_0_9_6 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=0.02 , snake_case_=1E-1_2 , snake_case_=0 , snake_case_=2 , snake_case_=1 , snake_case_="absolute" , snake_case_=True , **snake_case_ , ) -> str: super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = initializer_range _a = layer_norm_eps _a = position_embedding_type _a = use_cache
691
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __snake_case : List[str] = logging.get_logger("transformers.models.encodec") __snake_case : Tuple = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } __snake_case : int = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } __snake_case : Optional[int] = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } __snake_case : Tuple = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } __snake_case : int = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } __snake_case : Union[str, Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __snake_case : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __snake_case : Tuple = [] __snake_case : Optional[int] = [] def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ): for attribute in key.split("." ): _a = getattr(lowerCamelCase__, lowerCamelCase__ ) if weight_type is not None: _a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value elif weight_type == "weight_ih_l0": _a = value elif weight_type == "weight_hh_l0": _a = value elif weight_type == "bias_ih_l0": _a = value elif weight_type == "bias_hh_l0": _a = value elif weight_type == "weight_ih_l1": _a = value elif weight_type == "weight_hh_l1": _a = value elif weight_type == "bias_ih_l1": _a = value elif weight_type == "bias_hh_l1": _a = value else: _a = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ): _a = [] if model_name == "encodec_24khz" or "encodec_32khz": _a = MAPPING_24K elif model_name == "encodec_48khz": _a = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__, lowerCamelCase__ ): logger.info(F'''{name} was ignored''' ) continue _a = False for key, mapped_key in MAPPING.items(): if "*" in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: _a = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue _a = True if "*" in mapped_key: _a = name.split(lowerCamelCase__ )[0].split("." )[-2] _a = mapped_key.replace("*", lowerCamelCase__ ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "weight_ih_l0" in name: _a = "weight_ih_l0" elif "weight_hh_l0" in name: _a = "weight_hh_l0" elif "bias_ih_l0" in name: _a = "bias_ih_l0" elif "bias_hh_l0" in name: _a = "bias_hh_l0" elif "weight_ih_l1" in name: _a = "weight_ih_l1" elif "weight_hh_l1" in name: _a = "weight_hh_l1" elif "bias_ih_l1" in name: _a = "bias_ih_l1" elif "bias_hh_l1" in name: _a = "bias_hh_l1" elif "bias" in name: _a = "bias" elif "weight" in name: _a = "weight" elif "running_mean" in name: _a = "running_mean" elif "running_var" in name: _a = "running_var" elif "num_batches_tracked" in name: _a = "num_batches_tracked" else: _a = None set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ): if config_path is not None: _a = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: _a = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": _a = [8, 5, 4, 4] _a = [2.2] _a = 64 _a = 32_000 _a = 2_048 _a = False _a = False _a = False elif model_name == "encodec_48khz": _a = [8, 5, 4, 2] _a = [3.0, 6.0, 12.0, 24.0] _a = 48_000 _a = 2 _a = False _a = "time_group_norm" _a = True _a = 1.0 _a = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) _a = EncodecModel(lowerCamelCase__ ) _a = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(lowerCamelCase__ ) _a = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights _a = original_checkpoint["best_state"] recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __snake_case : List[Any] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
691
1
'''simple docstring''' from math import factorial def _lowercase ( lowerCamelCase__ : int = 100 ): return sum(map(lowerCamelCase__, str(factorial(lowerCamelCase__ ) ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
691
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : int = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
691
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __snake_case : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model") __snake_case : Dict = {"target_lang": "fi", "source_lang": "en"} __snake_case : Any = ">>zh<<" __snake_case : str = "Helsinki-NLP/" if is_torch_available(): __snake_case : Any = "pt" elif is_tf_available(): __snake_case : Dict = "tf" else: __snake_case : Dict = "jax" @require_sentencepiece class A ( a , unittest.TestCase ): __UpperCAmelCase : Dict = MarianTokenizer __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Any = True def __lowerCAmelCase ( self ) -> Optional[int]: super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = Path(self.tmpdirname ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["target_spm"] ) _a = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **snake_case_ ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Any: return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> str: _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(snake_case_ ) , 9 ) def __lowerCAmelCase ( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) _a = en_de_tokenizer(["I am a small frog"] , return_tensors=snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0] self.assertListEqual(snake_case_ , batch.input_ids[0] ) _a = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(snake_case_ ) _a = [x.name for x in Path(snake_case_ ).glob("*" )] self.assertIn("source.spm" , snake_case_ ) MarianTokenizer.from_pretrained(snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_tokenizer() _a = tok( ["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2) ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = self.get_tokenizer() _a = tok(["I am a tiny frog", "I am a small frog"] , padding=snake_case_ , return_tensors=snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) ) @slow def __lowerCAmelCase ( self ) -> Dict: # fmt: off _a = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def __lowerCAmelCase ( self ) -> str: _a = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) _a = "Tämä on testi" _a = "This is a test" _a = [7_6, 7, 2_0_4_7, 2] _a = [6_9, 1_2, 1_1, 9_4_0, 2] _a = tokenizer(snake_case_ ).input_ids self.assertListEqual(snake_case_ , snake_case_ ) _a = tokenizer(text_target=snake_case_ ).input_ids self.assertListEqual(snake_case_ , snake_case_ ) _a = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ )
691
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=a ): __UpperCAmelCase : int = ["""torch""", """scipy"""] def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple: requires_backends(self , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]: requires_backends(cls , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any: requires_backends(cls , ["torch", "scipy"] )
691
1
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class A ( a ): def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = tempfile.mkdtemp() _a = 8 # DPR tok _a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _a = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) _a = os.path.join(snake_case_ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok _a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] _a = {"unk_token": "<unk>"} _a = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) _a = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["vocab_file"] ) _a = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case_ ) ) def __lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def __lowerCAmelCase ( self ) -> DPRContextEncoderTokenizer: return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def __lowerCAmelCase ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def __lowerCAmelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __lowerCAmelCase ( self ) -> List[str]: _a = self.get_dummy_dataset() _a = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: _a = dataset _a = RagRetriever( snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: _a = self.get_dummy_dataset() _a = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: _a = os.path.join(self.tmpdirname , "dataset" ) _a = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset _a = RagRetriever( snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: _a = RagRetriever( snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case_ ) , ) return retriever def __lowerCAmelCase ( self ) -> List[Any]: _a = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) _a = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) _a = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) _a = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case_ , open(snake_case_ , "wb" ) ) _a = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) _a = RagRetriever( snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __lowerCAmelCase ( self ) -> str: _a = 1 _a = self.get_dummy_canonical_hf_index_retriever() _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(snake_case_ , n_docs=snake_case_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __lowerCAmelCase ( self ) -> List[str]: _a = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: _a = self.get_dummy_dataset() retriever.save_pretrained(snake_case_ ) _a = RagRetriever.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(snake_case_ , n_docs=1 ) self.assertTrue(out is not None ) def __lowerCAmelCase ( self ) -> List[Any]: _a = 1 _a = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(snake_case_ , n_docs=snake_case_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case_ ) _a = RagRetriever.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(snake_case_ , n_docs=1 ) self.assertTrue(out is not None ) def __lowerCAmelCase ( self ) -> Dict: _a = 1 _a = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(snake_case_ , n_docs=snake_case_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case_ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case_ ) _a = RagRetriever.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(snake_case_ , n_docs=1 ) self.assertTrue(out is not None ) def __lowerCAmelCase ( self ) -> Dict: _a = 1 _a = self.get_dummy_legacy_index_retriever() _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a , _a , _a = retriever.retrieve(snake_case_ , n_docs=snake_case_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case_ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __lowerCAmelCase ( self ) -> List[str]: _a = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case_ ) _a = RagRetriever.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever.retrieve(snake_case_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __lowerCAmelCase ( self ) -> str: import torch _a = 1 _a = self.get_dummy_canonical_hf_index_retriever() _a = [[5, 7], [1_0, 1_1]] _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever(snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ ) _a , _a , _a = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) self.assertIsInstance(snake_case_ , np.ndarray ) _a = retriever( snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ , return_tensors="pt" , ) _a , _a , _a , _a = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case_ , torch.Tensor ) self.assertIsInstance(snake_case_ , torch.Tensor ) self.assertIsInstance(snake_case_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __lowerCAmelCase ( self ) -> Any: _a = self.get_dpr_ctx_encoder_tokenizer() _a = 1 _a = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ ) retriever.set_ctx_encoder_tokenizer(snake_case_ ) _a = [[5, 7], [1_0, 1_1]] _a = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) _a = retriever(snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ ) self.assertEqual( len(snake_case_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case_ ) # check for doc token related keys in dictionary.
691
'''simple docstring''' __snake_case : Dict = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
691
1
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class A : __UpperCAmelCase : float __UpperCAmelCase : TreeNode | None = None __UpperCAmelCase : TreeNode | None = None def _lowercase ( lowerCamelCase__ : TreeNode | None ): # Validation def is_valid_tree(lowerCamelCase__ : TreeNode | None ) -> bool: if node is None: return True if not isinstance(lowerCamelCase__, lowerCamelCase__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(lowerCamelCase__ ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( lowerCamelCase__ : TreeNode | None, lowerCamelCase__ : float, lowerCamelCase__ : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left, lowerCamelCase__, node.data ) and is_binary_search_tree_recursive_check( node.right, node.data, lowerCamelCase__ ) ) return is_binary_search_tree_recursive_check(lowerCamelCase__, -float("inf" ), float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = ProphetNetTokenizer __UpperCAmelCase : Optional[Any] = False def __lowerCAmelCase ( self ) -> Tuple: super().setUp() _a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , snake_case_ ) -> Any: _a = "UNwant\u00E9d,running" _a = "unwanted, running" return input_text, output_text def __lowerCAmelCase ( self ) -> Any: _a = self.tokenizer_class(self.vocab_file ) _a = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def __lowerCAmelCase ( self ) -> List[str]: _a = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> List[Any]: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> int: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __lowerCAmelCase ( self ) -> List[str]: _a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] _a = {} for i, token in enumerate(snake_case_ ): _a = i _a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = ["A long paragraph for summarization.", "Another paragraph for summarization."] _a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2] _a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ ) _a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == text + [1_0_2] assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
691
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: _a = tempfile.mkdtemp() _a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) _a = { "do_resize": True, "size": {"height": 2_2_4, "width": 2_2_4}, "do_center_crop": True, "crop_size": {"height": 1_8, "width": 1_8}, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], "do_convert_rgb": True, } _a = os.path.join(self.tmpdirname , snake_case_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , **snake_case_ ) -> int: return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: _a = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _a = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.get_tokenizer() _a = self.get_rust_tokenizer() _a = self.get_image_processor() _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) processor_slow.save_pretrained(self.tmpdirname ) _a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ ) _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) processor_fast.save_pretrained(self.tmpdirname ) _a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case_ ) self.assertIsInstance(processor_fast.tokenizer , snake_case_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case_ ) self.assertIsInstance(processor_fast.image_processor , snake_case_ ) def __lowerCAmelCase ( self ) -> Dict: _a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) _a = self.get_image_processor(do_normalize=snake_case_ ) _a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=snake_case_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.get_image_processor() _a = self.get_tokenizer() _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _a = self.prepare_image_inputs() _a = image_processor(snake_case_ , return_tensors="np" ) _a = processor(images=snake_case_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_image_processor() _a = self.get_tokenizer() _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _a = "Alexandra,T-shirt的价格是15便士。" _a = processor(text=snake_case_ ) _a = tokenizer(snake_case_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_image_processor() _a = self.get_tokenizer() _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _a = "Alexandra,T-shirt的价格是15便士。" _a = self.prepare_image_inputs() _a = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def __lowerCAmelCase ( self ) -> Optional[int]: _a = self.get_image_processor() _a = self.get_tokenizer() _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a = processor.batch_decode(snake_case_ ) _a = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> Any: _a = self.get_image_processor() _a = self.get_tokenizer() _a = ChineseCLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) _a = "Alexandra,T-shirt的价格是15便士。" _a = self.prepare_image_inputs() _a = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
691
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _lowercase ( ): _a = argparse.ArgumentParser() parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 ) parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 ) parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 ) parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ ) parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 ) parser.add_argument("--seed", type=lowerCamelCase__, default=0 ) parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" ) parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 ) parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 ) parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" ) return parser.parse_args() __snake_case : str = load("accuracy") def _lowercase ( lowerCamelCase__ : List[str] ): _a , _a = eval_pred _a = np.argmax(lowerCamelCase__, axis=1 ) return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ ) class A ( a ): def __init__( self , snake_case_ ) -> None: super().__init__() _a = trainer def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]: if control.should_evaluate: _a = deepcopy(snake_case_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _lowercase ( ): _a = get_args() set_seed(args.seed ) _a = load_dataset("codeparrot/codecomplex", split="train" ) _a = dataset.train_test_split(test_size=0.2 ) _a = train_test["test"].train_test_split(test_size=0.5 ) _a = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) _a = AutoTokenizer.from_pretrained(args.model_ckpt ) _a = tokenizer.eos_token _a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 ) _a = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _a = False _a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowerCamelCase__ : Tuple ): _a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 ) _a = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _a = train_test_validation.map( lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, ) _a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ ) _a = TrainingArguments( output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", ) _a = Trainer( model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, ) print("Training..." ) trainer.add_callback(CustomCallback(lowerCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
691
1
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME __snake_case : Dict = ["small", "medium", "large"] __snake_case : Optional[int] = "lm_head.decoder.weight" __snake_case : Optional[int] = "lm_head.weight" def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str ): _a = torch.load(lowerCamelCase__ ) _a = d.pop(lowerCamelCase__ ) os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ ) torch.save(lowerCamelCase__, os.path.join(lowerCamelCase__, lowerCamelCase__ ) ) if __name__ == "__main__": __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) __snake_case : List[str] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __snake_case : Tuple = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __snake_case : Tuple = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
691
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ): _a = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] _a = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } _a = F'''{src_lang}-{tgt_lang}''' _a = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ ) _a = os.path.join(lowerCamelCase__, "README.md" ) print(F'''Generating {path}''' ) with open(lowerCamelCase__, "w", encoding="utf-8" ) as f: f.write(lowerCamelCase__ ) # make sure we are under the root of the project __snake_case : int = Path(__file__).resolve().parent.parent.parent __snake_case : int = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: __snake_case : Any = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
691
1
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
691
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __snake_case : List[str] = logging.get_logger(__name__) __snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp __snake_case : str = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } __snake_case : Dict = { "RUCAIBox/mvp": 1024, } class A ( a ): __UpperCAmelCase : int = VOCAB_FILES_NAMES __UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""] __UpperCAmelCase : List[Any] = MvpTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]: super().__init__( snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , ) _a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space: _a = getattr(snake_case_ , pre_tok_state.pop("type" ) ) _a = add_prefix_space _a = pre_tok_class(**snake_case_ ) _a = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _a = "post_processor" _a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ ) if tokenizer_component_instance: _a = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _a = tuple(state["sep"] ) if "cls" in state: _a = tuple(state["cls"] ) _a = False if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space: _a = add_prefix_space _a = True if state.get("trim_offsets" , snake_case_ ) != trim_offsets: _a = trim_offsets _a = True if changes_to_apply: _a = getattr(snake_case_ , state.pop("type" ) ) _a = component_class(**snake_case_ ) setattr(self.backend_tokenizer , snake_case_ , snake_case_ ) @property def __lowerCAmelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]: _a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value _a = value def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding: _a = kwargs.get("is_split_into_words" , snake_case_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding: _a = kwargs.get("is_split_into_words" , snake_case_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]: _a = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]: _a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
691
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=a ): __UpperCAmelCase : int = ["""torch""", """scipy"""] def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple: requires_backends(self , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]: requires_backends(cls , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any: requires_backends(cls , ["torch", "scipy"] )
691
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case : Optional[Any] = 12_8022 __snake_case : List[str] = 12_8028 @require_sentencepiece class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = MaMaaaTokenizer __UpperCAmelCase : int = False __UpperCAmelCase : str = False __UpperCAmelCase : Tuple = True def __lowerCAmelCase ( self ) -> Any: super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = Path(self.tmpdirname ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) _a = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_tokenizer() _a = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __lowerCAmelCase ( self ) -> Any: pass def __lowerCAmelCase ( self ) -> Dict: _a = self.get_tokenizer() _a = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , ) _a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) _a = tokenizer.convert_tokens_to_string(snake_case_ ) self.assertEqual(snake_case_ , "This is a test" ) @slow def __lowerCAmelCase ( self ) -> List[Any]: # fmt: off _a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): __UpperCAmelCase : Any = """facebook/m2m100_418M""" __UpperCAmelCase : Dict = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] __UpperCAmelCase : Optional[Any] = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off __UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __lowerCAmelCase ( cls ) -> int: _a = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) _a = 1 return cls def __lowerCAmelCase ( self ) -> Any: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.tokenizer.get_vocab() self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: _a = "en" _a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) # fmt: off _a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on _a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) _a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = tempfile.mkdtemp() _a = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(snake_case_ ) _a = MaMaaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "en" _a = "fr" _a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" ) _a = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _a = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _a = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _a = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _a = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __lowerCAmelCase ( self ) -> int: _a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS "input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 1_2_8_0_0_6, } , )
691
1
'''simple docstring''' import enum import shutil import sys __snake_case , __snake_case : List[Any] = shutil.get_terminal_size() __snake_case : int = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class A ( enum.Enum ): __UpperCAmelCase : str = 0 __UpperCAmelCase : Dict = 1 def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int]="" ): sys.stdout.write(str(lowerCamelCase__ ) + end ) sys.stdout.flush() def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str, lowerCamelCase__ : Dict="" ): forceWrite(F'''\u001b[{color}m{content}\u001b[0m''', lowerCamelCase__ ) def _lowercase ( ): forceWrite("\r" ) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : str ): forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' ) def _lowercase ( ): forceWrite(" " * TERMINAL_WIDTH ) reset_cursor() def _lowercase ( ): reset_cursor() forceWrite("-" * TERMINAL_WIDTH )
691
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Tuple = logging.get_logger(__name__) __snake_case : int = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class A ( a ): __UpperCAmelCase : Union[str, Any] = """wav2vec2""" def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]: super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # adapter _a = add_adapter _a = adapter_kernel_size _a = adapter_stride _a = num_adapter_layers _a = output_hidden_size or hidden_size _a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = xvector_output_dim @property def __lowerCAmelCase ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
691
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path __snake_case : List[str] = "src/transformers" # Matches is_xxx_available() __snake_case : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __snake_case : Union[str, Any] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __snake_case : Optional[int] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __snake_case : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __snake_case : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __snake_case : Optional[Any] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __snake_case : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __snake_case : int = re.compile("^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __snake_case : Optional[int] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __snake_case : Union[str, Any] = re.compile(R"^\s*try:") # Catches a line with else: __snake_case : List[str] = re.compile(R"^\s*else:") def _lowercase ( lowerCamelCase__ : Any ): if _re_test_backend.search(lowerCamelCase__ ) is None: return None _a = [b[0] for b in _re_backend.findall(lowerCamelCase__ )] backends.sort() return "_and_".join(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Dict ): with open(lowerCamelCase__, "r", encoding="utf-8", newline="\n" ) as f: _a = f.readlines() _a = 0 while line_index < len(lowerCamelCase__ ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase__ ): return None # First grab the objects without a specific backend in _import_structure _a = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: _a = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase__ ): _a = _re_one_line_import_struct.search(lowerCamelCase__ ).groups()[0] _a = re.findall("\[([^\]]+)\]", lowerCamelCase__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue _a = _re_import_struct_key_value.search(lowerCamelCase__ ) if single_line_import_search is not None: _a = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase__ ) > 0] objects.extend(lowerCamelCase__ ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 _a = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. _a = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _a = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _a = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): _a = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase__ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase__ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase__ ) is not None: _a = _re_import_struct_add_many.search(lowerCamelCase__ ).groups()[0].split(", " ) _a = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0] objects.extend(lowerCamelCase__ ) elif _re_between_brackets.search(lowerCamelCase__ ) is not None: _a = _re_between_brackets.search(lowerCamelCase__ ).groups()[0].split(", " ) _a = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0] objects.extend(lowerCamelCase__ ) elif _re_quote_object.search(lowerCamelCase__ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase__ ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 _a = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _a = [] while ( line_index < len(lowerCamelCase__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): _a = lines[line_index] _a = _re_import.search(lowerCamelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 _a = {"none": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase__ ): # If the line is an if is_backend_available, we grab all objects associated. _a = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _a = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _a = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): _a = lines[line_index] _a = _re_import.search(lowerCamelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 _a = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ): def find_duplicates(lowerCamelCase__ : Any ): return [k for k, v in collections.Counter(lowerCamelCase__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _a = [] for key in import_dict_objects.keys(): _a = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _a = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _a = "base imports" if key == "none" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _lowercase ( ): _a = [] for root, _, files in os.walk(lowerCamelCase__ ): if "__init__.py" in files: _a = os.path.join(lowerCamelCase__, "__init__.py" ) _a = parse_init(lowerCamelCase__ ) if objects is not None: _a = analyze_results(*lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: _a = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("\n".join(lowerCamelCase__ ) ) if len(lowerCamelCase__ ) > 0: raise ValueError("\n\n".join(lowerCamelCase__ ) ) def _lowercase ( ): _a = [] for path, directories, files in os.walk(lowerCamelCase__ ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(lowerCamelCase__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase__ ) / folder).glob("*.py" ) ) ) == 0: continue _a = str((Path(lowerCamelCase__ ) / folder).relative_to(lowerCamelCase__ ) ) _a = short_path.replace(os.path.sep, "." ) submodules.append(lowerCamelCase__ ) for fname in files: if fname == "__init__.py": continue _a = str((Path(lowerCamelCase__ ) / fname).relative_to(lowerCamelCase__ ) ) _a = short_path.replace(".py", "" ).replace(os.path.sep, "." ) if len(submodule.split("." ) ) == 1: submodules.append(lowerCamelCase__ ) return submodules __snake_case : str = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", ] def _lowercase ( ): # This is to make sure the transformers module imported is the one in the repo. _a = importlib.util.spec_from_file_location( "transformers", os.path.join(lowerCamelCase__, "__init__.py" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) _a = spec.loader.load_module() _a = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(lowerCamelCase__ ) > 0: _a = "\n".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F'''{list_of_modules}\n''' "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number | (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number & ~(1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number ^ (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[list] ): _a = current_set.copy() for row_index, row in enumerate(lowerCamelCase__ ): _a = row[0] for column_index, column in enumerate(lowerCamelCase__ ): if magnitude == 0: _a = column continue _a = column / magnitude # Subtract to cancel term _a = current_set[0] _a = [first_row] _a = current_set[1::] for row in current_set: _a = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCamelCase__ ) continue for column_index in range(len(lowerCamelCase__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCamelCase__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a = final_set[0] _a = [] _a = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a = simplify(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, lowerCamelCase__ ) _a = resultant return final_set def _lowercase ( lowerCamelCase__ : list[list] ): if len(lowerCamelCase__ ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) _a = len(lowerCamelCase__ ) + 1 if any(len(lowerCamelCase__ ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(lowerCamelCase__ ) == 1: return [equations[0][-1] / equations[0][0]] _a = equations.copy() if any(0 in row for row in data_set ): _a = data_set.copy() _a = [] for row_index, row in enumerate(lowerCamelCase__ ): if 0 not in row: _a = data_set.pop(lowerCamelCase__ ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0, lowerCamelCase__ ) _a = data_set.copy() _a = simplify(lowerCamelCase__ ) _a = simplified[::-1] _a = [] for row in simplified: _a = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a = row.copy()[: len(lowerCamelCase__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCamelCase__ ) == 0: solutions.append(0 ) continue _a = temp_row[1::] _a = temp_row[::-1] for column_index, column in enumerate(lowerCamelCase__ ): current_solution -= column * solutions[column_index] solutions.append(lowerCamelCase__ ) _a = [] for item in solutions: final.append(float(round(lowerCamelCase__, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __snake_case : Tuple = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
691
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __snake_case : List[Any] = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ): _a = True while ask_again: _a = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ): _a = BulletMenu(lowerCamelCase__, lowerCamelCase__ ) _a = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _lowercase ( lowerCamelCase__ : Dict ): _a = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _lowercase ( lowerCamelCase__ : List[Any] ): _a = int(lowerCamelCase__ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _lowercase ( lowerCamelCase__ : str ): return {"yes": True, "no": False}[value.lower()] class A ( argparse.RawDescriptionHelpFormatter ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = usage.replace("<command> [<args>] " , "" ) return usage
691
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list ): if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] _a = [] def generate(lowerCamelCase__ : int, lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1, lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even _a , _a = arr[k - 1], arr[i] else: # k is odd _a , _a = arr[k - 1], arr[0] generate(k - 1, lowerCamelCase__ ) generate(len(lowerCamelCase__ ), lowerCamelCase__ ) return res if __name__ == "__main__": __snake_case : Tuple = input("Enter numbers separated by a comma:\n").strip() __snake_case : List[Any] = [int(item) for item in user_input.split(",")] print(heaps(arr))
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[list] ): _a = current_set.copy() for row_index, row in enumerate(lowerCamelCase__ ): _a = row[0] for column_index, column in enumerate(lowerCamelCase__ ): if magnitude == 0: _a = column continue _a = column / magnitude # Subtract to cancel term _a = current_set[0] _a = [first_row] _a = current_set[1::] for row in current_set: _a = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCamelCase__ ) continue for column_index in range(len(lowerCamelCase__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCamelCase__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a = final_set[0] _a = [] _a = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a = simplify(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, lowerCamelCase__ ) _a = resultant return final_set def _lowercase ( lowerCamelCase__ : list[list] ): if len(lowerCamelCase__ ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) _a = len(lowerCamelCase__ ) + 1 if any(len(lowerCamelCase__ ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(lowerCamelCase__ ) == 1: return [equations[0][-1] / equations[0][0]] _a = equations.copy() if any(0 in row for row in data_set ): _a = data_set.copy() _a = [] for row_index, row in enumerate(lowerCamelCase__ ): if 0 not in row: _a = data_set.pop(lowerCamelCase__ ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0, lowerCamelCase__ ) _a = data_set.copy() _a = simplify(lowerCamelCase__ ) _a = simplified[::-1] _a = [] for row in simplified: _a = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a = row.copy()[: len(lowerCamelCase__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCamelCase__ ) == 0: solutions.append(0 ) continue _a = temp_row[1::] _a = temp_row[::-1] for column_index, column in enumerate(lowerCamelCase__ ): current_solution -= column * solutions[column_index] solutions.append(lowerCamelCase__ ) _a = [] for item in solutions: final.append(float(round(lowerCamelCase__, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __snake_case : Tuple = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
691
1
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __snake_case : Optional[int] = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n" __snake_case : Optional[int] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n" __snake_case : Optional[Any] = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n" @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> int: _a = 0.0 for i, j in zip(snake_case_ , snake_case_ ): n_correct += 1.0 if math_equivalence.is_equiv(snake_case_ , snake_case_ ) else 0.0 _a = n_correct / len(snake_case_ ) return { "accuracy": accuracy, }
691
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing return x.sum() def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing return i + 1 @dataclass class A : __UpperCAmelCase : int __UpperCAmelCase : str class A ( a ): def __lowerCAmelCase ( self ) -> Tuple: _a = {} _a = [] _a = 1 _a = [1, 2] _a = {"a": 1, "b": 2} _a = {"a": [1, 2], "b": [3, 4]} _a = {"a": {"1": 1}, "b": 2} _a = {"a": 1, "b": 2, "c": 3, "d": 4} _a = {} _a = [] _a = 2 _a = [2, 3] _a = {"a": 2, "b": 3} _a = {"a": [2, 3], "b": [4, 5]} _a = {"a": {"1": 2}, "b": 3} _a = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) _a = 2 self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) _a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} _a = {"a": 2, "b": 0, "c": 2} _a = { "a": np.eye(2 ).astype(snake_case_ ), "b": np.zeros(3 ).astype(snake_case_ ), "c": np.ones(2 ).astype(snake_case_ ), } self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(snake_case_ ): # can't pickle a local lambda map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ ) def __lowerCAmelCase ( self ) -> Any: _a = {"a": 1, "b": 2} _a = {"a": 3, "b": 4} _a = {"a": 5, "b": 6} _a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ ) def __lowerCAmelCase ( self ) -> str: class A : __UpperCAmelCase : Optional[int] = """bar""" _a = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(snake_case_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc", [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ): with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: _a = {F'''{i}''': i for i in range(lowerCamelCase__ )} _a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A ( a ): @require_tf def __lowerCAmelCase ( self ) -> Any: import tensorflow as tf from tensorflow.keras import layers _a = layers.Dense(2 ) def gen_random_output(): _a = tf.random.uniform((1, 3) ) return model(snake_case_ ).numpy() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: import torch def gen_random_output(): _a = torch.nn.Linear(3 , 2 ) _a = torch.rand(1 , 3 ) return model(snake_case_ ).detach().numpy() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __lowerCAmelCase ( self ) -> Optional[int]: def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(4_2 ): _a = gen_random_output() with temp_seed(4_2 ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data", [{}] ) def _lowercase ( lowerCamelCase__ : Any ): _a = NestedDataStructure(lowerCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output", [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ], ) def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ): _a = NestedDataStructure(lowerCamelCase__ ).flatten() assert output == expected_output def _lowercase ( ): _a = A(x=1, y="foobar" ) _a = {"x": 1, "y": "foobar"} assert asdict(lowerCamelCase__ ) == expected_output _a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]} _a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(lowerCamelCase__ ) == expected_output with pytest.raises(lowerCamelCase__ ): asdict([1, A(x=10, y="foo" )] ) def _lowercase ( lowerCamelCase__ : str ): return text.split() def _lowercase ( lowerCamelCase__ : List[Any] ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowercase ( ): with Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _a = [] for yield_time, content in iflatmap_unordered( lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowerCamelCase__ ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(lowerCamelCase__ ) == 4
691
1
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=3 , snake_case_=None , snake_case_=2 , ) -> Dict: _a = parent _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = scope _a = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _a = (image_size // patch_size) ** 2 _a = num_patches + 2 def __lowerCAmelCase ( self ) -> List[Any]: _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Optional[int]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]: _a = DeiTModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]: _a = DeiTForMaskedImageModeling(config=snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _a = 1 _a = DeiTForMaskedImageModeling(snake_case_ ) model.to(snake_case_ ) model.eval() _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(snake_case_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any: _a = self.type_sequence_label_size _a = DeiTForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _a = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a = 1 _a = DeiTForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> Tuple: _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A ( a , a , unittest.TestCase ): __UpperCAmelCase : List[str] = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __UpperCAmelCase : Tuple = ( { """feature-extraction""": DeiTModel, """image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCAmelCase : Any = False __UpperCAmelCase : int = False __UpperCAmelCase : List[Any] = False def __lowerCAmelCase ( self ) -> int: _a = DeiTModelTester(self ) _a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 ) def __lowerCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def __lowerCAmelCase ( self ) -> Optional[Any]: pass def __lowerCAmelCase ( self ) -> Any: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(snake_case_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) ) def __lowerCAmelCase ( self ) -> int: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(snake_case_ ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def __lowerCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ ) def __lowerCAmelCase ( self ) -> Dict: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Dict: _a = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowerCAmelCase ( self ) -> Union[str, Any]: if not self.model_tester.is_training: return _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(snake_case_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _a = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _a = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) _a = model(**snake_case_ ).loss loss.backward() def __lowerCAmelCase ( self ) -> List[Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _a = False _a = True for model_class in self.all_model_classes: if model_class in get_values(snake_case_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _a = model_class(snake_case_ ) model.gradient_checkpointing_enable() model.to(snake_case_ ) model.train() _a = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) _a = model(**snake_case_ ).loss loss.backward() def __lowerCAmelCase ( self ) -> Optional[int]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(snake_case_ ), *get_values(snake_case_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ): _a = problem_type["title"] _a = problem_type["num_labels"] _a = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _a = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if problem_type["num_labels"] > 1: _a = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) _a = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=snake_case_ ) as warning_list: _a = model(**snake_case_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __lowerCAmelCase ( self ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = DeiTModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def _lowercase ( ): _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> Union[str, Any]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Dict: _a = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( snake_case_ ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ ) # forward pass with torch.no_grad(): _a = model(**snake_case_ ) # verify the logits _a = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case_ ) _a = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowerCAmelCase ( self ) -> Any: _a = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=snake_case_ , return_tensors="pt" ) _a = inputs.pixel_values.to(snake_case_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _a = model(snake_case_ )
691
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(a ) class A ( a ): __UpperCAmelCase : Dict = """rag""" __UpperCAmelCase : Dict = True def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]: super().__init__( bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _a = kwargs.pop("question_encoder" ) _a = question_encoder_config.pop("model_type" ) _a = kwargs.pop("generator" ) _a = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = reduce_loss _a = label_smoothing _a = exclude_bos_score _a = do_marginalize _a = title_sep _a = doc_sep _a = n_docs _a = max_combined_length _a = dataset _a = dataset_split _a = index_name _a = retrieval_vector_size _a = retrieval_batch_size _a = passages_path _a = index_path _a = use_dummy_dataset _a = output_retrieved _a = do_deduplication _a = use_cache if self.forced_eos_token_id is None: _a = getattr(self.generator , "forced_eos_token_id" , snake_case_ ) @classmethod def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = copy.deepcopy(self.__dict__ ) _a = self.question_encoder.to_dict() _a = self.generator.to_dict() _a = self.__class__.model_type return output
691
1
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __snake_case : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def _lowercase ( lowerCamelCase__ : str ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(lowerCamelCase__ ): return ext raise Exception( F'''Unable to determine file format from file extension {path}. ''' F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def _lowercase ( lowerCamelCase__ : int ): _a = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) _a = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _a = PipelineDataFormat.from_str( format=lowerCamelCase__, output_path=args.output, input_path=args.input, column=args.column if args.column else nlp.default_input_names, overwrite=args.overwrite, ) return RunCommand(lowerCamelCase__, lowerCamelCase__ ) class A ( a ): def __init__( self , snake_case_ , snake_case_ ) -> str: _a = nlp _a = reader @staticmethod def __lowerCAmelCase ( snake_case_ ) -> int: _a = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=snake_case_ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=snake_case_ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=snake_case_ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=snake_case_ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=snake_case_ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=snake_case_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=snake_case_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=snake_case_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a , _a = self._nlp, [] for entry in self._reader: _a = nlp(**snake_case_ ) if self._reader.is_multi_columns else nlp(snake_case_ ) if isinstance(snake_case_ , snake_case_ ): outputs.append(snake_case_ ) else: outputs += output # Saving data if self._nlp.binary_output: _a = self._reader.save_binary(snake_case_ ) logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(snake_case_ )
691
'''simple docstring''' class A : def __init__( self ) -> List[str]: _a = 0 _a = 0 _a = {} def __lowerCAmelCase ( self , snake_case_ ) -> int: if vertex not in self.adjacency: _a = {} self.num_vertices += 1 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]: self.add_vertex(snake_case_ ) self.add_vertex(snake_case_ ) if head == tail: return _a = weight _a = weight def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.get_edges() for edge in edges: _a , _a , _a = edge edges.remove((tail, head, weight) ) for i in range(len(snake_case_ ) ): _a = list(edges[i] ) edges.sort(key=lambda snake_case_ : e[2] ) for i in range(len(snake_case_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _a = edges[i][2] + 1 for edge in edges: _a , _a , _a = edge _a = weight _a = weight def __str__( self ) -> Optional[int]: _a = "" for tail in self.adjacency: for head in self.adjacency[tail]: _a = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __lowerCAmelCase ( self ) -> Any: return self.adjacency.keys() @staticmethod def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any: _a = Graph() if vertices is None: _a = [] if edges is None: _a = [] for vertex in vertices: g.add_vertex(snake_case_ ) for edge in edges: g.add_edge(*snake_case_ ) return g class A : def __init__( self ) -> Optional[int]: _a = {} _a = {} def __len__( self ) -> List[Any]: return len(self.parent ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: if item in self.parent: return self.find(snake_case_ ) _a = item _a = 0 return item def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]: if item not in self.parent: return self.make_set(snake_case_ ) if item != self.parent[item]: _a = self.find(self.parent[item] ) return self.parent[item] def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]: _a = self.find(snake_case_ ) _a = self.find(snake_case_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _a = roota return roota if self.rank[roota] < self.rank[roota]: _a = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _a = roota return roota return None @staticmethod def __lowerCAmelCase ( snake_case_ ) -> Tuple: _a = graph.num_vertices _a = Graph.UnionFind() _a = [] while num_components > 1: _a = {} for vertex in graph.get_vertices(): _a = -1 _a = graph.get_edges() for edge in edges: _a , _a , _a = edge edges.remove((tail, head, weight) ) for edge in edges: _a , _a , _a = edge _a = union_find.find(snake_case_ ) _a = union_find.find(snake_case_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _a = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _a = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _a , _a , _a = cheap_edge[vertex] if union_find.find(snake_case_ ) != union_find.find(snake_case_ ): union_find.union(snake_case_ , snake_case_ ) mst_edges.append(cheap_edge[vertex] ) _a = num_components - 1 _a = Graph.build(edges=snake_case_ ) return mst
691
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __snake_case : Dict = logging.get_logger(__name__) __snake_case : Tuple = { "shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json", # See all Dinat models at https://huggingface.co/models?filter=dinat } class A ( a , a ): __UpperCAmelCase : Optional[Any] = """dinat""" __UpperCAmelCase : Dict = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , snake_case_=4 , snake_case_=3 , snake_case_=6_4 , snake_case_=[3, 4, 6, 5] , snake_case_=[2, 4, 8, 1_6] , snake_case_=7 , snake_case_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case_=3.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Tuple: super().__init__(**snake_case_ ) _a = patch_size _a = num_channels _a = embed_dim _a = depths _a = len(snake_case_ ) _a = num_heads _a = kernel_size _a = dilations _a = mlp_ratio _a = qkv_bias _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = drop_path_rate _a = hidden_act _a = layer_norm_eps _a = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _a = int(embed_dim * 2 ** (len(snake_case_ ) - 1) ) _a = layer_scale_init_value _a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )] _a , _a = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
691
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case : Tuple = "\\n Text data.\n Second line of data." __snake_case : int = "file" @pytest.fixture(scope="session" ) def _lowercase ( lowerCamelCase__ : Optional[Any] ): _a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") _a = bytes(lowerCamelCase__, "utf-8" ) with zstd.open(lowerCamelCase__, "wb" ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture def _lowercase ( lowerCamelCase__ : int ): with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f: f.write(lowerCamelCase__ ) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] ) def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ): _a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} _a = input_paths[compression_format] _a = tmp_path / "cache" _a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ ) _a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ ) with open(lowerCamelCase__ ) as f: _a = f.read() with open(lowerCamelCase__ ) as f: _a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False] ) @pytest.mark.parametrize("default_cache_dir", [True, False] ) def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ): _a = "custom_cache" _a = "custom_extracted_dir" _a = tmp_path / "custom_extracted_path" if default_extracted: _a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) ) _a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _a = xz_file _a = ( DownloadConfig(extract_compressed_file=lowerCamelCase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ ) ) _a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ ) assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected def _lowercase ( lowerCamelCase__ : Union[str, Any] ): # absolute path _a = str(Path(lowerCamelCase__ ).resolve() ) assert cached_path(lowerCamelCase__ ) == text_file # relative path _a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCamelCase__ ) == text_file def _lowercase ( lowerCamelCase__ : Dict ): # absolute path _a = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(lowerCamelCase__ ): cached_path(lowerCamelCase__ ) # relative path _a = "./__missing_file__.txt" with pytest.raises(lowerCamelCase__ ): cached_path(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(lowerCamelCase__ ) as f: _a = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( ): with pytest.raises(lowerCamelCase__ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): http_get("https://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): fsspec_head("s3://huggingface.co" )
691
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class A ( unittest.TestCase ): __UpperCAmelCase : Optional[Any] = StableDiffusionLDMaDPipeline __UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) _a = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _a = CLIPTextModel(snake_case_ ) _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Union[str, Any]: if str(snake_case_ ).startswith("mps" ): _a = torch.manual_seed(snake_case_ ) else: _a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _a = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = StableDiffusionLDMaDPipeline(**snake_case_ ) _a = ldmad_pipe.to(snake_case_ ) ldmad_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs(snake_case_ ) _a = ldmad_pipe(**snake_case_ ) _a , _a = output.rgb, output.depth _a = rgb[0, -3:, -3:, -1] _a = depth[0, -3:, -1] assert rgb.shape == (1, 6_4, 6_4, 3) assert depth.shape == (1, 6_4, 6_4) _a = np.array( [0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] ) _a = np.array([103.46_727, 85.812_004, 87.849_236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_dummy_components() _a = StableDiffusionLDMaDPipeline(**snake_case_ ) _a = ldmad_pipe.to(snake_case_ ) ldmad_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs(snake_case_ ) _a = 3 * [inputs["prompt"]] # forward _a = ldmad_pipe(**snake_case_ ) _a , _a = output.rgb, output.depth _a = rgb_slice_a[0, -3:, -3:, -1] _a = depth_slice_a[0, -3:, -1] _a = self.get_dummy_inputs(snake_case_ ) _a = 3 * [inputs.pop("prompt" )] _a = ldmad_pipe.tokenizer( snake_case_ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , ) _a = text_inputs["input_ids"].to(snake_case_ ) _a = ldmad_pipe.text_encoder(snake_case_ )[0] _a = prompt_embeds # forward _a = ldmad_pipe(**snake_case_ ) _a , _a = output.rgb, output.depth _a = rgb_slice_a[0, -3:, -3:, -1] _a = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def __lowerCAmelCase ( self ) -> int: _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = PNDMScheduler(skip_prk_steps=snake_case_ ) _a = StableDiffusionLDMaDPipeline(**snake_case_ ) _a = ldmad_pipe.to(snake_case_ ) ldmad_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs(snake_case_ ) _a = "french fries" _a = ldmad_pipe(**snake_case_ , negative_prompt=snake_case_ ) _a , _a = output.rgb, output.depth _a = rgb[0, -3:, -3:, -1] _a = depth[0, -3:, -1] assert rgb.shape == (1, 6_4, 6_4, 3) assert depth.shape == (1, 6_4, 6_4) _a = np.array( [0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] ) _a = np.array([107.84_738, 84.62_802, 89.962_135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ) -> Tuple: _a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _a = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 6_4, 6_4) ) _a = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ) _a = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self ) -> Tuple: _a = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ) _a = ldmad_pipe.to(snake_case_ ) ldmad_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_inputs(snake_case_ ) _a = ldmad_pipe(**snake_case_ ) _a , _a = output.rgb, output.depth _a = rgb[0, -3:, -3:, -1].flatten() _a = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_1_2, 5_1_2, 3) assert depth.shape == (1, 5_1_2, 5_1_2) _a = np.array( [0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] ) _a = np.array( [0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Union[str, Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ) -> int: _a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _a = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 6_4, 6_4) ) _a = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ) _a = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 5_0, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: _a = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(snake_case_ ) ldmad_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_inputs(snake_case_ ) _a = ldmad_pipe(**snake_case_ ) _a , _a = output.rgb, output.depth _a = 0.495_586 _a = 0.33_795_515 _a = 112.48_518 _a = 98.489_746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def __lowerCAmelCase ( self ) -> List[str]: _a = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(snake_case_ ) ldmad_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_inputs(snake_case_ ) _a = ldmad_pipe(**snake_case_ ) _a , _a = output.rgb, output.depth _a = 0.4_194_127 _a = 0.35_375_586 _a = 0.5_638_502 _a = 0.34_686_103 assert rgb.shape == (1, 5_1_2, 5_1_2, 3) assert depth.shape == (1, 5_1_2, 5_1_2, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
691
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __snake_case : Union[str, Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowercase ( lowerCamelCase__ : List[Any] ): _a = {} state_dict.pop("pixel_mean", lowerCamelCase__ ) state_dict.pop("pixel_std", lowerCamelCase__ ) _a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _a = key.replace(lowerCamelCase__, lowerCamelCase__ ) if re.match(lowerCamelCase__, lowerCamelCase__ ): _a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) ) if layer_nb == 0: _a = key.replace("layers.0", "proj_in" ) elif layer_nb == 1: _a = key.replace("layers.1", "layers.0" ) elif layer_nb == 2: _a = key.replace("layers.2", "proj_out" ) _a = value _a = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ): _a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _a = SamConfig() elif "sam_vit_l" in model_name: _a = SamVisionConfig( hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) _a = SamConfig( vision_config=lowerCamelCase__, ) elif "sam_vit_h" in model_name: _a = SamVisionConfig( hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) _a = SamConfig( vision_config=lowerCamelCase__, ) _a = torch.load(lowerCamelCase__, map_location="cpu" ) _a = replace_keys(lowerCamelCase__ ) _a = SamImageProcessor() _a = SamProcessor(image_processor=lowerCamelCase__ ) _a = SamModel(lowerCamelCase__ ) hf_model.load_state_dict(lowerCamelCase__ ) _a = hf_model.to("cuda" ) _a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" _a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" ) _a = [[[400, 650]]] _a = [[1]] _a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 _a = ((75, 275, 1_725, 850),) _a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. _a = [[[400, 650], [800, 650]]] _a = [[1, 1]] _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": __snake_case : Union[str, Any] = argparse.ArgumentParser() __snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) __snake_case : str = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
691
1
'''simple docstring''' import argparse import os import re __snake_case : str = "src/diffusers" # Pattern that looks at the indentation in a line. __snake_case : Optional[int] = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __snake_case : Optional[int] = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __snake_case : List[Any] = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __snake_case : Optional[int] = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __snake_case : Tuple = re.compile(R"\[([^\]]+)\]") def _lowercase ( lowerCamelCase__ : Optional[int] ): _a = _re_indent.search(lowerCamelCase__ ) return "" if search is None else search.groups()[0] def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[int]="", lowerCamelCase__ : Optional[Any]=None, lowerCamelCase__ : Tuple=None ): _a = 0 _a = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(lowerCamelCase__ ): index += 1 _a = ["\n".join(lines[:index] )] else: _a = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _a = [lines[index]] index += 1 while index < len(lowerCamelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase__ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowerCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(lowerCamelCase__ ) ) if index < len(lowerCamelCase__ ) - 1: _a = [lines[index + 1]] index += 1 else: _a = [] else: blocks.append("\n".join(lowerCamelCase__ ) ) _a = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowerCamelCase__ ) > 0: blocks.append("\n".join(lowerCamelCase__ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowerCamelCase__ ): blocks.append("\n".join(lines[index:] ) ) return blocks def _lowercase ( lowerCamelCase__ : str ): def _inner(lowerCamelCase__ : Optional[Any] ): return key(lowerCamelCase__ ).lower().replace("_", "" ) return _inner def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str=None ): # If no key is provided, we use a noop. def noop(lowerCamelCase__ : Dict ): return x if key is None: _a = noop # Constants are all uppercase, they go first. _a = [obj for obj in objects if key(lowerCamelCase__ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _a = [obj for obj in objects if key(lowerCamelCase__ )[0].isupper() and not key(lowerCamelCase__ ).isupper()] # Functions begin with a lowercase, they go last. _a = [obj for obj in objects if not key(lowerCamelCase__ )[0].isupper()] _a = ignore_underscore(lowerCamelCase__ ) return sorted(lowerCamelCase__, key=lowerCamelCase__ ) + sorted(lowerCamelCase__, key=lowerCamelCase__ ) + sorted(lowerCamelCase__, key=lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : List[str] ): # This inner function sort imports between [ ]. def _replace(lowerCamelCase__ : Dict ): _a = match.groups()[0] if "," not in imports: return F'''[{imports}]''' _a = [part.strip().replace("\"", "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _a = keys[:-1] return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(lowerCamelCase__ )] ) + "]" _a = import_statement.split("\n" ) if len(lowerCamelCase__ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _a = 2 if lines[1].strip() == "[" else 1 _a = [(i, _re_strip_line.search(lowerCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _a = sort_objects(lowerCamelCase__, key=lambda lowerCamelCase__ : x[1] ) _a = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowerCamelCase__ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _a = _re_bracket_content.sub(_replace, lines[1] ) else: _a = [part.strip().replace("\"", "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _a = keys[:-1] _a = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(lowerCamelCase__ )] ) return "\n".join(lowerCamelCase__ ) else: # Finally we have to deal with imports fitting on one line _a = _re_bracket_content.sub(_replace, lowerCamelCase__ ) return import_statement def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str=True ): with open(lowerCamelCase__, "r" ) as f: _a = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _a = split_code_in_indented_blocks( lowerCamelCase__, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1, len(lowerCamelCase__ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _a = main_blocks[block_idx] _a = block.split("\n" ) # Get to the start of the imports. _a = 0 while line_idx < len(lowerCamelCase__ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _a = len(lowerCamelCase__ ) else: line_idx += 1 if line_idx >= len(lowerCamelCase__ ): continue # Ignore beginning and last line: they don't contain anything. _a = "\n".join(block_lines[line_idx:-1] ) _a = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _a = split_code_in_indented_blocks(lowerCamelCase__, indent_level=lowerCamelCase__ ) # We have two categories of import key: list or _import_structure[key].append/extend _a = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _a = [(pattern.search(lowerCamelCase__ ).groups()[0] if pattern.search(lowerCamelCase__ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _a = [(i, key) for i, key in enumerate(lowerCamelCase__ ) if key is not None] _a = [x[0] for x in sorted(lowerCamelCase__, key=lambda lowerCamelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _a = 0 _a = [] for i in range(len(lowerCamelCase__ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: _a = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(lowerCamelCase__ ) count += 1 # And we put our main block back together with its first and last line. _a = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(lowerCamelCase__ ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(lowerCamelCase__, "w" ) as f: f.write("\n".join(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ : Union[str, Any]=True ): _a = [] for root, _, files in os.walk(lowerCamelCase__ ): if "__init__.py" in files: _a = sort_imports(os.path.join(lowerCamelCase__, "__init__.py" ), check_only=lowerCamelCase__ ) if result: _a = [os.path.join(lowerCamelCase__, "__init__.py" )] if len(lowerCamelCase__ ) > 0: raise ValueError(F'''Would overwrite {len(lowerCamelCase__ )} files, run `make style`.''' ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __snake_case : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
691
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase__ : List[Any] ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _a = [] for i in range(lowerCamelCase__ ): _a = i / num_diffusion_timesteps _a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) ) return torch.tensor(lowerCamelCase__, dtype=torch.floataa ) class A ( a , a ): __UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers] __UpperCAmelCase : Optional[int] = 2 @register_to_config def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]: if trained_betas is not None: _a = torch.tensor(snake_case_ , dtype=torch.floataa ) elif beta_schedule == "linear": _a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _a = betas_for_alpha_bar(snake_case_ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _a = 1.0 - self.betas _a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(snake_case_ , snake_case_ , snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict: if schedule_timesteps is None: _a = self.timesteps _a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _a = 1 if len(snake_case_ ) > 1 else 0 else: _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep _a = self._index_counter[timestep_int] return indices[pos].item() @property def __lowerCAmelCase ( self ) -> Dict: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor: _a = self.index_for_timestep(snake_case_ ) if self.state_in_first_order: _a = self.sigmas[step_index] else: _a = self.sigmas_interpol[step_index] _a = sample / ((sigma**2 + 1) ** 0.5) return sample def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]: _a = num_inference_steps _a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) _a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ ) _a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ ) _a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _a = torch.from_numpy(snake_case_ ).to(device=snake_case_ ) # interpolate sigmas _a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() _a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(snake_case_ ).startswith("mps" ): # mps does not support float64 _a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa ) else: _a = torch.from_numpy(snake_case_ ).to(snake_case_ ) # interpolate timesteps _a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype ) _a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() _a = torch.cat([timesteps[:1], interleaved_timesteps] ) _a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _a = defaultdict(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: # get log sigma _a = sigma.log() # get distribution _a = log_sigma - self.log_sigmas[:, None] # get sigmas range _a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _a = low_idx + 1 _a = self.log_sigmas[low_idx] _a = self.log_sigmas[high_idx] # interpolate sigmas _a = (low - log_sigma) / (low - high) _a = w.clamp(0 , 1 ) # transform interpolation to time range _a = (1 - w) * low_idx + w * high_idx _a = t.view(sigma.shape ) return t @property def __lowerCAmelCase ( self ) -> List[Any]: return self.sample is None def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]: _a = self.index_for_timestep(snake_case_ ) # advance index counter by 1 _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _a = self.sigmas[step_index] _a = self.sigmas_interpol[step_index + 1] _a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _a = self.sigmas[step_index - 1] _a = self.sigmas_interpol[step_index] _a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _a = 0 _a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _a = sigma_interpol - sigma_hat # store for 2nd order step _a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _a = sigma_next - sigma_hat _a = self.sample _a = None _a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples _a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ): # mps does not support float64 _a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) _a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: _a = self.timesteps.to(original_samples.device ) _a = timesteps.to(original_samples.device ) _a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps] _a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _a = sigma.unsqueeze(-1 ) _a = original_samples + noise * sigma return noisy_samples def __len__( self ) -> str: return self.config.num_train_timesteps
691
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number | (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number & ~(1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number ^ (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ): # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ): # Color current vertex _a = i # Validate coloring if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ): return True # Backtrack _a = -1 return False def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ): _a = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ): return colored_vertices return []
691
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase__ : List[Any] ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _a = [] for i in range(lowerCamelCase__ ): _a = i / num_diffusion_timesteps _a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) ) return torch.tensor(lowerCamelCase__, dtype=torch.floataa ) class A ( a , a ): __UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers] __UpperCAmelCase : Optional[int] = 2 @register_to_config def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]: if trained_betas is not None: _a = torch.tensor(snake_case_ , dtype=torch.floataa ) elif beta_schedule == "linear": _a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _a = betas_for_alpha_bar(snake_case_ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _a = 1.0 - self.betas _a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(snake_case_ , snake_case_ , snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict: if schedule_timesteps is None: _a = self.timesteps _a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _a = 1 if len(snake_case_ ) > 1 else 0 else: _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep _a = self._index_counter[timestep_int] return indices[pos].item() @property def __lowerCAmelCase ( self ) -> Dict: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor: _a = self.index_for_timestep(snake_case_ ) if self.state_in_first_order: _a = self.sigmas[step_index] else: _a = self.sigmas_interpol[step_index] _a = sample / ((sigma**2 + 1) ** 0.5) return sample def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]: _a = num_inference_steps _a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) _a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ ) _a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ ) _a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _a = torch.from_numpy(snake_case_ ).to(device=snake_case_ ) # interpolate sigmas _a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() _a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(snake_case_ ).startswith("mps" ): # mps does not support float64 _a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa ) else: _a = torch.from_numpy(snake_case_ ).to(snake_case_ ) # interpolate timesteps _a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype ) _a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() _a = torch.cat([timesteps[:1], interleaved_timesteps] ) _a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _a = defaultdict(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: # get log sigma _a = sigma.log() # get distribution _a = log_sigma - self.log_sigmas[:, None] # get sigmas range _a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _a = low_idx + 1 _a = self.log_sigmas[low_idx] _a = self.log_sigmas[high_idx] # interpolate sigmas _a = (low - log_sigma) / (low - high) _a = w.clamp(0 , 1 ) # transform interpolation to time range _a = (1 - w) * low_idx + w * high_idx _a = t.view(sigma.shape ) return t @property def __lowerCAmelCase ( self ) -> List[Any]: return self.sample is None def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]: _a = self.index_for_timestep(snake_case_ ) # advance index counter by 1 _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _a = self.sigmas[step_index] _a = self.sigmas_interpol[step_index + 1] _a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _a = self.sigmas[step_index - 1] _a = self.sigmas_interpol[step_index] _a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _a = 0 _a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _a = sigma_interpol - sigma_hat # store for 2nd order step _a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _a = sigma_next - sigma_hat _a = self.sample _a = None _a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples _a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ): # mps does not support float64 _a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) _a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: _a = self.timesteps.to(original_samples.device ) _a = timesteps.to(original_samples.device ) _a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps] _a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _a = sigma.unsqueeze(-1 ) _a = original_samples + noise * sigma return noisy_samples def __len__( self ) -> str: return self.config.num_train_timesteps
691
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class A : def __init__( self , snake_case_ ) -> Optional[int]: _a = str(id_ ) _a = None _a = None _a = [] _a = {} # {vertex:distance} def __lt__( self , snake_case_ ) -> Optional[Any]: return self.key < other.key def __repr__( self ) -> Union[str, Any]: return self.id def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: self.neighbors.append(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any: _a = weight def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ ) graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ): _a = [] for u in graph: _a = math.inf _a = None _a = 0 _a = graph[:] while q: _a = min(lowerCamelCase__ ) q.remove(lowerCamelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _a = u _a = u.edges[v.id] for i in range(1, len(lowerCamelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ): for u in graph: _a = math.inf _a = None _a = 0 _a = list(lowerCamelCase__ ) hq.heapify(lowerCamelCase__ ) while h: _a = hq.heappop(lowerCamelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _a = u _a = u.edges[v.id] hq.heapify(lowerCamelCase__ ) for i in range(1, len(lowerCamelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _lowercase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( a ): __UpperCAmelCase : Optional[Any] = (DPMSolverSinglestepScheduler,) __UpperCAmelCase : Dict = (("""num_inference_steps""", 25),) def __lowerCAmelCase ( self , **snake_case_ ) -> List[str]: _a = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**snake_case_ ) return config def __lowerCAmelCase ( self , snake_case_=0 , **snake_case_ ) -> Union[str, Any]: _a = dict(self.forward_default_kwargs ) _a = kwargs.pop("num_inference_steps" , snake_case_ ) _a = self.dummy_sample _a = 0.1 * sample _a = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _a = self.get_scheduler_config(**snake_case_ ) _a = scheduler_class(**snake_case_ ) scheduler.set_timesteps(snake_case_ ) # copy over dummy past residuals _a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case_ ) _a = scheduler_class.from_pretrained(snake_case_ ) new_scheduler.set_timesteps(snake_case_ ) # copy over dummy past residuals _a = dummy_past_residuals[: new_scheduler.config.solver_order] _a , _a = sample, sample for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ): _a = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample _a = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> Tuple: pass def __lowerCAmelCase ( self , snake_case_=0 , **snake_case_ ) -> str: _a = dict(self.forward_default_kwargs ) _a = kwargs.pop("num_inference_steps" , snake_case_ ) _a = self.dummy_sample _a = 0.1 * sample _a = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _a = self.get_scheduler_config() _a = scheduler_class(**snake_case_ ) scheduler.set_timesteps(snake_case_ ) # copy over dummy past residuals (must be after setting timesteps) _a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case_ ) _a = scheduler_class.from_pretrained(snake_case_ ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case_ ) # copy over dummy past residual (must be after setting timesteps) _a = dummy_past_residuals[: new_scheduler.config.solver_order] _a = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample _a = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , snake_case_=None , **snake_case_ ) -> Union[str, Any]: if scheduler is None: _a = self.scheduler_classes[0] _a = self.get_scheduler_config(**snake_case_ ) _a = scheduler_class(**snake_case_ ) _a = self.scheduler_classes[0] _a = self.get_scheduler_config(**snake_case_ ) _a = scheduler_class(**snake_case_ ) _a = 1_0 _a = self.dummy_model() _a = self.dummy_sample_deter scheduler.set_timesteps(snake_case_ ) for i, t in enumerate(scheduler.timesteps ): _a = model(snake_case_ , snake_case_ ) _a = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample return sample def __lowerCAmelCase ( self ) -> Tuple: _a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _a = 5_0 _a = self.dummy_model() _a = self.dummy_sample_deter scheduler.set_timesteps(snake_case_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _a = model(snake_case_ , snake_case_ ) _a = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def __lowerCAmelCase ( self ) -> Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _a = self.full_loop(scheduler=snake_case_ ) _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 _a = DEISMultistepScheduler.from_config(scheduler.config ) _a = DPMSolverMultistepScheduler.from_config(scheduler.config ) _a = UniPCMultistepScheduler.from_config(scheduler.config ) _a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _a = self.full_loop(scheduler=snake_case_ ) _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def __lowerCAmelCase ( self ) -> List[Any]: self.check_over_configs(thresholding=snake_case_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type="dpmsolver++" , solver_order=snake_case_ , solver_type=snake_case_ , ) def __lowerCAmelCase ( self ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , ) _a = self.full_loop( solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , ) assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> List[Any]: self.check_over_configs(lower_order_final=snake_case_ ) self.check_over_configs(lower_order_final=snake_case_ ) def __lowerCAmelCase ( self ) -> int: self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def __lowerCAmelCase ( self ) -> List[str]: self.check_over_configs(variance_type=snake_case_ ) self.check_over_configs(variance_type="learned_range" ) def __lowerCAmelCase ( self ) -> Tuple: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 ) def __lowerCAmelCase ( self ) -> Tuple: _a = self.full_loop() _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def __lowerCAmelCase ( self ) -> List[str]: _a = self.full_loop(use_karras_sigmas=snake_case_ ) _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def __lowerCAmelCase ( self ) -> Tuple: _a = self.full_loop(prediction_type="v_prediction" ) _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case_ ) _a = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def __lowerCAmelCase ( self ) -> Optional[int]: _a = self.scheduler_classes[0] _a = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 ) _a = scheduler_class(**snake_case_ ) _a = 1_0 _a = self.dummy_model() _a = self.dummy_sample_deter.half() scheduler.set_timesteps(snake_case_ ) for i, t in enumerate(scheduler.timesteps ): _a = model(snake_case_ , snake_case_ ) _a = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample assert sample.dtype == torch.floataa
691
'''simple docstring''' __snake_case : List[str] = "Tobias Carryer" from time import time class A : def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008 _a = multiplier _a = increment _a = modulo _a = seed def __lowerCAmelCase ( self ) -> str: _a = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
691
1
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow __snake_case : Optional[Any] = False class A ( unittest.TestCase ): def __lowerCAmelCase ( self , snake_case_=3_2 ) -> Dict: set_seed(0 ) _a = UNetaDModel(sample_size=snake_case_ , in_channels=3 , out_channels=3 ) _a = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def __lowerCAmelCase ( self ) -> Any: _a = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _a = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=snake_case_ , ) _a = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=snake_case_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) _a = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(snake_case_ ) for _ in range(4 )] _a = [torch.randn((4, 3, 3_2, 3_2) ).to(snake_case_ ) for _ in range(4 )] _a = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(snake_case_ ) for _ in range(4 )] # train with a DDPM scheduler _a , _a = self.get_model_optimizer(resolution=3_2 ) model.train().to(snake_case_ ) for i in range(4 ): optimizer.zero_grad() _a = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _a = model(snake_case_ , timesteps[i] ).sample _a = torch.nn.functional.mse_loss(snake_case_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _a , _a = self.get_model_optimizer(resolution=3_2 ) model.train().to(snake_case_ ) for i in range(4 ): optimizer.zero_grad() _a = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _a = model(snake_case_ , timesteps[i] ).sample _a = torch.nn.functional.mse_loss(snake_case_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
691
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __snake_case : List[str] = logging.get_logger("transformers.models.encodec") __snake_case : Tuple = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } __snake_case : int = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } __snake_case : Optional[int] = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } __snake_case : Tuple = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } __snake_case : int = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } __snake_case : Union[str, Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __snake_case : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __snake_case : Tuple = [] __snake_case : Optional[int] = [] def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ): for attribute in key.split("." ): _a = getattr(lowerCamelCase__, lowerCamelCase__ ) if weight_type is not None: _a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value elif weight_type == "weight_ih_l0": _a = value elif weight_type == "weight_hh_l0": _a = value elif weight_type == "bias_ih_l0": _a = value elif weight_type == "bias_hh_l0": _a = value elif weight_type == "weight_ih_l1": _a = value elif weight_type == "weight_hh_l1": _a = value elif weight_type == "bias_ih_l1": _a = value elif weight_type == "bias_hh_l1": _a = value else: _a = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ): _a = [] if model_name == "encodec_24khz" or "encodec_32khz": _a = MAPPING_24K elif model_name == "encodec_48khz": _a = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__, lowerCamelCase__ ): logger.info(F'''{name} was ignored''' ) continue _a = False for key, mapped_key in MAPPING.items(): if "*" in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: _a = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue _a = True if "*" in mapped_key: _a = name.split(lowerCamelCase__ )[0].split("." )[-2] _a = mapped_key.replace("*", lowerCamelCase__ ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "weight_ih_l0" in name: _a = "weight_ih_l0" elif "weight_hh_l0" in name: _a = "weight_hh_l0" elif "bias_ih_l0" in name: _a = "bias_ih_l0" elif "bias_hh_l0" in name: _a = "bias_hh_l0" elif "weight_ih_l1" in name: _a = "weight_ih_l1" elif "weight_hh_l1" in name: _a = "weight_hh_l1" elif "bias_ih_l1" in name: _a = "bias_ih_l1" elif "bias_hh_l1" in name: _a = "bias_hh_l1" elif "bias" in name: _a = "bias" elif "weight" in name: _a = "weight" elif "running_mean" in name: _a = "running_mean" elif "running_var" in name: _a = "running_var" elif "num_batches_tracked" in name: _a = "num_batches_tracked" else: _a = None set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ): if config_path is not None: _a = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: _a = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": _a = [8, 5, 4, 4] _a = [2.2] _a = 64 _a = 32_000 _a = 2_048 _a = False _a = False _a = False elif model_name == "encodec_48khz": _a = [8, 5, 4, 2] _a = [3.0, 6.0, 12.0, 24.0] _a = 48_000 _a = 2 _a = False _a = "time_group_norm" _a = True _a = 1.0 _a = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) _a = EncodecModel(lowerCamelCase__ ) _a = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(lowerCamelCase__ ) _a = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights _a = original_checkpoint["best_state"] recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __snake_case : List[Any] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
691
1
'''simple docstring''' import string from math import logaa def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str ): _a = document.translate( str.maketrans("", "", string.punctuation ) ).replace("\n", "" ) _a = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : str ): _a = corpus.lower().translate( str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with '' _a = corpus_without_punctuation.split("\n" ) _a = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowerCamelCase__ )) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : Optional[int]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ), 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ), 3 ) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return round(tf * idf, 3 )
691
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : int = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
691
1
'''simple docstring''' import math import unittest def _lowercase ( lowerCamelCase__ : int ): assert isinstance(lowerCamelCase__, lowerCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(lowerCamelCase__ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(1_1 ) ) self.assertTrue(is_prime(1_3 ) ) self.assertTrue(is_prime(1_7 ) ) self.assertTrue(is_prime(1_9 ) ) self.assertTrue(is_prime(2_3 ) ) self.assertTrue(is_prime(2_9 ) ) def __lowerCAmelCase ( self ) -> List[str]: with self.assertRaises(snake_case_ ): is_prime(-1_9 ) self.assertFalse( is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
691
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=a ): __UpperCAmelCase : int = ["""torch""", """scipy"""] def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple: requires_backends(self , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]: requires_backends(cls , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any: requires_backends(cls , ["torch", "scipy"] )
691
1
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): __UpperCAmelCase : Dict = 0 __UpperCAmelCase : int = 1 __UpperCAmelCase : List[Any] = 2 @add_end_docstrings(a ) class A ( a ): __UpperCAmelCase : Dict = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self , *snake_case_ , **snake_case_ ) -> Dict: super().__init__(*snake_case_ , **snake_case_ ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a = None if self.model.config.prefix is not None: _a = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a = self._sanitize_parameters(prefix=snake_case_ , **self._forward_params ) _a = {**self._preprocess_params, **preprocess_params} _a = {**self._forward_params, **forward_params} def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Any: _a = {} if prefix is not None: _a = prefix if prefix: _a = self.tokenizer( snake_case_ , padding=snake_case_ , add_special_tokens=snake_case_ , return_tensors=self.framework ) _a = prefix_inputs["input_ids"].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' " [None, 'hole']" ) _a = handle_long_generation preprocess_params.update(snake_case_ ) _a = generate_kwargs _a = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_full_text`" ) if return_tensors is not None: raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" ) _a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("`return_text` is mutually exclusive with `return_tensors`" ) _a = ReturnType.TENSORS if return_type is not None: _a = return_type if clean_up_tokenization_spaces is not None: _a = clean_up_tokenization_spaces if stop_sequence is not None: _a = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) if len(snake_case_ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) _a = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Optional[Any]: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"add_space_before_punct_symbol": True} ) return super()._parse_and_tokenize(*snake_case_ , **snake_case_ ) def __call__( self , snake_case_ , **snake_case_ ) -> Optional[int]: return super().__call__(snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_="" , snake_case_=None , **snake_case_ ) -> str: _a = self.tokenizer( prefix + prompt_text , padding=snake_case_ , add_special_tokens=snake_case_ , return_tensors=self.framework ) _a = prompt_text if handle_long_generation == "hole": _a = inputs["input_ids"].shape[-1] if "max_new_tokens" in generate_kwargs: _a = generate_kwargs["max_new_tokens"] else: _a = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("We cannot infer how many new tokens are expected" ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" " models max length" ) _a = inputs["input_ids"][:, -keep_length:] if "attention_mask" in inputs: _a = inputs["attention_mask"][:, -keep_length:] return inputs def __lowerCAmelCase ( self , snake_case_ , **snake_case_ ) -> Any: _a = model_inputs["input_ids"] _a = model_inputs.get("attention_mask" , snake_case_ ) # Allow empty prompts if input_ids.shape[1] == 0: _a = None _a = None _a = 1 else: _a = input_ids.shape[0] _a = model_inputs.pop("prompt_text" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a = generate_kwargs.pop("prefix_length" , 0 ) if prefix_length > 0: _a = "max_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].max_new_tokens is not None ) if not has_max_new_tokens: _a = generate_kwargs.get("max_length" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a = "min_new_tokens" in generate_kwargs or ( "generation_config" in generate_kwargs and generate_kwargs["generation_config"].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a = self.model.generate(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ ) _a = generated_sequence.shape[0] if self.framework == "pt": _a = generated_sequence.reshape(snake_case_ , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a = tf.reshape(snake_case_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowerCAmelCase ( self , snake_case_ , snake_case_=ReturnType.FULL_TEXT , snake_case_=True ) -> Union[str, Any]: _a = model_outputs["generated_sequence"][0] _a = model_outputs["input_ids"] _a = model_outputs["prompt_text"] _a = generated_sequence.numpy().tolist() _a = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a = {"generated_token_ids": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a = self.tokenizer.decode( snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a = 0 else: _a = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , ) ) if return_type == ReturnType.FULL_TEXT: _a = prompt_text + text[prompt_length:] else: _a = text[prompt_length:] _a = {"generated_text": all_text} records.append(snake_case_ ) return records
691
'''simple docstring''' __snake_case : Dict = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
691
1
'''simple docstring''' from __future__ import annotations class A : def __init__( self , snake_case_=None ) -> Any: _a = data _a = None def __repr__( self ) -> Optional[int]: _a = [] _a = self while temp: string_rep.append(F'''{temp.data}''' ) _a = temp.next return "->".join(snake_case_ ) def _lowercase ( lowerCamelCase__ : list ): if not elements_list: raise Exception("The Elements List is empty" ) _a = _a = Node(elements_list[0] ) for i in range(1, len(lowerCamelCase__ ) ): _a = Node(elements_list[i] ) _a = current.next return head def _lowercase ( lowerCamelCase__ : Node ): if head_node is not None and isinstance(lowerCamelCase__, lowerCamelCase__ ): print_reverse(head_node.next ) print(head_node.data ) def _lowercase ( ): from doctest import testmod testmod() _a = make_linked_list([14, 52, 14, 12, 43] ) print("Linked List:" ) print(lowerCamelCase__ ) print("Elements in Reverse:" ) print_reverse(lowerCamelCase__ ) if __name__ == "__main__": main()
691
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = ProphetNetTokenizer __UpperCAmelCase : Optional[Any] = False def __lowerCAmelCase ( self ) -> Tuple: super().setUp() _a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , snake_case_ ) -> Any: _a = "UNwant\u00E9d,running" _a = "unwanted, running" return input_text, output_text def __lowerCAmelCase ( self ) -> Any: _a = self.tokenizer_class(self.vocab_file ) _a = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def __lowerCAmelCase ( self ) -> List[str]: _a = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> List[Any]: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> int: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __lowerCAmelCase ( self ) -> List[str]: _a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] _a = {} for i, token in enumerate(snake_case_ ): _a = i _a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = ["A long paragraph for summarization.", "Another paragraph for summarization."] _a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2] _a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ ) _a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == text + [1_0_2] assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
691
1
'''simple docstring''' import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A ( a , unittest.TestCase ): __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __lowerCAmelCase ( self , snake_case_=0 ) -> Dict: _a = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(snake_case_ ) ) _a = np.random.RandomState(snake_case_ ) _a = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self ) -> Tuple: _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) _a = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> int: _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _a = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _a = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) # warmup pass to apply optimizations _a = pipe(**self.get_dummy_inputs() ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _a = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> List[str]: _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _a = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _a = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> int: _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _a = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __lowerCAmelCase ( self ) -> Any: _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs() _a = pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) _a = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class A ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Dict: _a = ort.SessionOptions() _a = False return options def __lowerCAmelCase ( self ) -> Optional[Any]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _a = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A fantasy landscape, trending on artstation" _a = np.random.RandomState(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case_ , output_type="np" , ) _a = output.images _a = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) _a = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __lowerCAmelCase ( self ) -> List[str]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _a = init_image.resize((7_6_8, 5_1_2) ) _a = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) _a = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case_ ) _a = "A fantasy landscape, trending on artstation" _a = np.random.RandomState(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=snake_case_ , output_type="np" , ) _a = output.images _a = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) _a = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
691
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _lowercase ( ): _a = argparse.ArgumentParser() parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 ) parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 ) parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 ) parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ ) parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 ) parser.add_argument("--seed", type=lowerCamelCase__, default=0 ) parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" ) parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 ) parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 ) parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" ) return parser.parse_args() __snake_case : str = load("accuracy") def _lowercase ( lowerCamelCase__ : List[str] ): _a , _a = eval_pred _a = np.argmax(lowerCamelCase__, axis=1 ) return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ ) class A ( a ): def __init__( self , snake_case_ ) -> None: super().__init__() _a = trainer def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]: if control.should_evaluate: _a = deepcopy(snake_case_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _lowercase ( ): _a = get_args() set_seed(args.seed ) _a = load_dataset("codeparrot/codecomplex", split="train" ) _a = dataset.train_test_split(test_size=0.2 ) _a = train_test["test"].train_test_split(test_size=0.5 ) _a = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) _a = AutoTokenizer.from_pretrained(args.model_ckpt ) _a = tokenizer.eos_token _a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 ) _a = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _a = False _a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowerCamelCase__ : Tuple ): _a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 ) _a = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _a = train_test_validation.map( lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, ) _a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ ) _a = TrainingArguments( output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", ) _a = Trainer( model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, ) print("Training..." ) trainer.add_callback(CustomCallback(lowerCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
691
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __snake_case : int = logging.get_logger(__name__) class A ( a ): __UpperCAmelCase : Optional[Any] = ["""input_features""", """attention_mask"""] def __init__( self , snake_case_=8_0 , snake_case_=1_6_0_0_0 , snake_case_=8_0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_=True , **snake_case_ , ) -> Dict: super().__init__(feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ ) _a = num_mel_bins _a = do_ceptral_normalize _a = normalize_means _a = normalize_vars _a = True def __lowerCAmelCase ( self , snake_case_ , ) -> np.ndarray: _a = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers _a = torch.from_numpy(snake_case_ ).unsqueeze(0 ) _a = ta_kaldi.fbank(snake_case_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( snake_case_ , snake_case_ , snake_case_ = True , snake_case_ = True , snake_case_ = 0.0 , ) -> np.ndarray: # make sure we normalize float32 arrays if normalize_means: _a = x[:input_length].mean(axis=0 ) _a = np.subtract(snake_case_ , snake_case_ ) if normalize_vars: _a = x[:input_length].std(axis=0 ) _a = np.divide(snake_case_ , snake_case_ ) if input_length < x.shape[0]: _a = padding_value # make sure array is in float32 _a = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[np.ndarray]: _a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(snake_case_ , snake_case_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(snake_case_ , snake_case_ ) ] def __call__( self , snake_case_ , snake_case_ = False , snake_case_ = None , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _a = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _a = is_batched_numpy or ( isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _a = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case_ , np.ndarray ): _a = np.asarray(snake_case_ , dtype=np.floataa ) elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _a = [raw_speech] # extract fbank features _a = [self._extract_fbank_features(snake_case_ ) for waveform in raw_speech] # convert into correct format for padding _a = BatchFeature({"input_features": features} ) _a = self.pad( snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) # make sure list is in array format _a = padded_inputs.get("input_features" ) if isinstance(input_features[0] , snake_case_ ): _a = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_features] _a = padded_inputs.get("attention_mask" ) if attention_mask is not None: _a = [np.asarray(snake_case_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: _a = ( np.array(snake_case_ , dtype=np.intaa ) if self._get_padding_strategies(snake_case_ , max_length=snake_case_ ) is not PaddingStrategy.DO_NOT_PAD else None ) _a = self.normalize( padded_inputs["input_features"] , attention_mask=snake_case_ ) if return_tensors is not None: _a = padded_inputs.convert_to_tensors(snake_case_ ) return padded_inputs
691
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ): _a = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] _a = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } _a = F'''{src_lang}-{tgt_lang}''' _a = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ ) _a = os.path.join(lowerCamelCase__, "README.md" ) print(F'''Generating {path}''' ) with open(lowerCamelCase__, "w", encoding="utf-8" ) as f: f.write(lowerCamelCase__ ) # make sure we are under the root of the project __snake_case : int = Path(__file__).resolve().parent.parent.parent __snake_case : int = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: __snake_case : Any = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
691
1
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[str], lowerCamelCase__ : Dict=1e-12 ): _a = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase__, axis=1 ), a_min=lowerCamelCase__ ) ).T _a = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase__, axis=1 ), a_min=lowerCamelCase__ ) ).T return jnp.matmul(lowerCamelCase__, norm_emb_a.T ) class A ( nn.Module ): __UpperCAmelCase : CLIPConfig __UpperCAmelCase : jnp.dtype = jnp.floataa def __lowerCAmelCase ( self ) -> List[str]: _a = FlaxCLIPVisionModule(self.config.vision_config ) _a = nn.Dense(self.config.projection_dim , use_bias=snake_case_ , dtype=self.dtype ) _a = self.param("concept_embeds" , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) _a = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) _a = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (1_7,) ) _a = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self , snake_case_ ) -> Tuple: _a = self.vision_model(snake_case_ )[1] _a = self.visual_projection(snake_case_ ) _a = jax_cosine_distance(snake_case_ , self.special_care_embeds ) _a = jax_cosine_distance(snake_case_ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs _a = 0.0 _a = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment _a = jnp.round(snake_case_ , 3 ) _a = jnp.any(special_scores > 0 , axis=1 , keepdims=snake_case_ ) # Use a lower threshold if an image has any special care concept _a = is_special_care * 0.01 _a = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment _a = jnp.round(snake_case_ , 3 ) _a = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class A ( a ): __UpperCAmelCase : Dict = CLIPConfig __UpperCAmelCase : Union[str, Any] = """clip_input""" __UpperCAmelCase : str = FlaxStableDiffusionSafetyCheckerModule def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = 0 , snake_case_ = jnp.floataa , snake_case_ = True , **snake_case_ , ) -> Union[str, Any]: if input_shape is None: _a = (1, 2_2_4, 2_2_4, 3) _a = self.module_class(config=snake_case_ , dtype=snake_case_ , **snake_case_ ) super().__init__(snake_case_ , snake_case_ , input_shape=snake_case_ , seed=snake_case_ , dtype=snake_case_ , _do_init=_do_init ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None ) -> FrozenDict: # init input tensor _a = jax.random.normal(snake_case_ , snake_case_ ) _a , _a = jax.random.split(snake_case_ ) _a = {"params": params_rng, "dropout": dropout_rng} _a = self.module.init(snake_case_ , snake_case_ )["params"] return random_params def __call__( self , snake_case_ , snake_case_ = None , ) -> str: _a = jnp.transpose(snake_case_ , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(snake_case_ , dtype=jnp.floataa ) , rngs={} , )
691
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __snake_case : List[str] = logging.get_logger(__name__) __snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp __snake_case : str = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } __snake_case : Dict = { "RUCAIBox/mvp": 1024, } class A ( a ): __UpperCAmelCase : int = VOCAB_FILES_NAMES __UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""] __UpperCAmelCase : List[Any] = MvpTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]: super().__init__( snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , ) _a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space: _a = getattr(snake_case_ , pre_tok_state.pop("type" ) ) _a = add_prefix_space _a = pre_tok_class(**snake_case_ ) _a = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _a = "post_processor" _a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ ) if tokenizer_component_instance: _a = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _a = tuple(state["sep"] ) if "cls" in state: _a = tuple(state["cls"] ) _a = False if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space: _a = add_prefix_space _a = True if state.get("trim_offsets" , snake_case_ ) != trim_offsets: _a = trim_offsets _a = True if changes_to_apply: _a = getattr(snake_case_ , state.pop("type" ) ) _a = component_class(**snake_case_ ) setattr(self.backend_tokenizer , snake_case_ , snake_case_ ) @property def __lowerCAmelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]: _a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value _a = value def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding: _a = kwargs.get("is_split_into_words" , snake_case_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding: _a = kwargs.get("is_split_into_words" , snake_case_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]: _a = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]: _a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
691
1
'''simple docstring''' from datetime import datetime import requests def _lowercase ( lowerCamelCase__ : str ): _a = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" _a = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(lowerCamelCase__ ).content if __name__ == "__main__": __snake_case : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __snake_case : Optional[Any] = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
691
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case : Optional[Any] = 12_8022 __snake_case : List[str] = 12_8028 @require_sentencepiece class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = MaMaaaTokenizer __UpperCAmelCase : int = False __UpperCAmelCase : str = False __UpperCAmelCase : Tuple = True def __lowerCAmelCase ( self ) -> Any: super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = Path(self.tmpdirname ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) _a = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_tokenizer() _a = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __lowerCAmelCase ( self ) -> Any: pass def __lowerCAmelCase ( self ) -> Dict: _a = self.get_tokenizer() _a = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , ) _a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) _a = tokenizer.convert_tokens_to_string(snake_case_ ) self.assertEqual(snake_case_ , "This is a test" ) @slow def __lowerCAmelCase ( self ) -> List[Any]: # fmt: off _a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): __UpperCAmelCase : Any = """facebook/m2m100_418M""" __UpperCAmelCase : Dict = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] __UpperCAmelCase : Optional[Any] = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off __UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __lowerCAmelCase ( cls ) -> int: _a = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) _a = 1 return cls def __lowerCAmelCase ( self ) -> Any: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.tokenizer.get_vocab() self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: _a = "en" _a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) # fmt: off _a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on _a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) _a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = tempfile.mkdtemp() _a = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(snake_case_ ) _a = MaMaaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "en" _a = "fr" _a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" ) _a = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _a = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _a = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _a = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _a = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __lowerCAmelCase ( self ) -> int: _a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS "input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 1_2_8_0_0_6, } , )
691
1
'''simple docstring''' import socket def _lowercase ( ): _a = socket.socket(socket.AF_INET, socket.SOCK_STREAM ) _a = socket.gethostname() _a = 12_312 sock.connect((host, port) ) sock.send(b"Hello server!" ) with open("Received_file", "wb" ) as out_file: print("File opened" ) print("Receiving data..." ) while True: _a = sock.recv(1_024 ) if not data: break out_file.write(lowerCamelCase__ ) print("Successfully received the file" ) sock.close() print("Connection closed" ) if __name__ == "__main__": main()
691
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Tuple = logging.get_logger(__name__) __snake_case : int = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class A ( a ): __UpperCAmelCase : Union[str, Any] = """wav2vec2""" def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]: super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # adapter _a = add_adapter _a = adapter_kernel_size _a = adapter_stride _a = num_adapter_layers _a = output_hidden_size or hidden_size _a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = xvector_output_dim @property def __lowerCAmelCase ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
691
1
'''simple docstring''' import requests from bsa import BeautifulSoup def _lowercase ( lowerCamelCase__ : str = "https://www.worldometers.info/coronavirus" ): _a = BeautifulSoup(requests.get(lowerCamelCase__ ).text, "html.parser" ) _a = soup.findAll("h1" ) _a = soup.findAll("div", {"class": "maincounter-number"} ) keys += soup.findAll("span", {"class": "panel-title"} ) values += soup.findAll("div", {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase__, lowerCamelCase__ )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number | (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number & ~(1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number ^ (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Any ): _a = [1] for i in range(2, lowerCamelCase__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _a = [] _a = list(range(lowerCamelCase__ ) ) # Find permutation while factorials: _a = factorials.pop() _a , _a = divmod(lowerCamelCase__, lowerCamelCase__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __snake_case : List[Any] = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ): _a = True while ask_again: _a = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ): _a = BulletMenu(lowerCamelCase__, lowerCamelCase__ ) _a = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _lowercase ( lowerCamelCase__ : Dict ): _a = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _lowercase ( lowerCamelCase__ : List[Any] ): _a = int(lowerCamelCase__ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _lowercase ( lowerCamelCase__ : str ): return {"yes": True, "no": False}[value.lower()] class A ( argparse.RawDescriptionHelpFormatter ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = usage.replace("<command> [<args>] " , "" ) return usage
691
1
'''simple docstring''' from manim import * class A ( a ): def __lowerCAmelCase ( self ) -> str: _a = Rectangle(height=0.5 , width=0.5 ) _a = Rectangle(height=0.25 , width=0.25 ) _a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _a = [mem.copy() for i in range(6 )] _a = [mem.copy() for i in range(6 )] _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) _a = Text("CPU" , font_size=2_4 ) _a = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case_ ) _a = [mem.copy() for i in range(4 )] _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = Text("GPU" , font_size=2_4 ) _a = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) gpu.move_to([-1, -1, 0] ) self.add(snake_case_ ) _a = [mem.copy() for i in range(6 )] _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = Text("Model" , font_size=2_4 ) _a = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) model.move_to([3, -1.0, 0] ) self.add(snake_case_ ) _a = [] _a = [] _a = [] for i, rect in enumerate(snake_case_ ): rect.set_stroke(snake_case_ ) _a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=snake_case_ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case_ , buff=0.0 ) self.add(snake_case_ ) model_cpu_arr.append(snake_case_ ) self.add(*snake_case_ , *snake_case_ , *snake_case_ ) _a = [mem.copy() for i in range(6 )] _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = Text("Loaded Checkpoint" , font_size=2_4 ) _a = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) checkpoint.move_to([3, 0.5, 0] ) self.add(snake_case_ ) _a = [] _a = [] for i, rect in enumerate(snake_case_ ): _a = fill.copy().set_fill(snake_case_ , opacity=0.7 ) target.move_to(snake_case_ ) ckpt_arr.append(snake_case_ ) _a = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(snake_case_ ) self.add(*snake_case_ , *snake_case_ ) _a = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _a = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case_ , snake_case_ ) _a = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , ) blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(snake_case_ ) _a = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) _a = [meta_mem.copy() for i in range(6 )] _a = [meta_mem.copy() for i in range(6 )] _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) _a = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) _a = Text("Disk" , font_size=2_4 ) _a = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(snake_case_ , run_time=3 ) , Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) ) _a = [] for i, rect in enumerate(snake_case_ ): _a = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) ) self.play(*snake_case_ ) self.play(FadeOut(snake_case_ ) ) _a = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=3 ) ) self.play( FadeOut(snake_case_ , snake_case_ , *snake_case_ , *snake_case_ ) , ) self.wait()
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[list] ): _a = current_set.copy() for row_index, row in enumerate(lowerCamelCase__ ): _a = row[0] for column_index, column in enumerate(lowerCamelCase__ ): if magnitude == 0: _a = column continue _a = column / magnitude # Subtract to cancel term _a = current_set[0] _a = [first_row] _a = current_set[1::] for row in current_set: _a = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCamelCase__ ) continue for column_index in range(len(lowerCamelCase__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCamelCase__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a = final_set[0] _a = [] _a = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a = simplify(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, lowerCamelCase__ ) _a = resultant return final_set def _lowercase ( lowerCamelCase__ : list[list] ): if len(lowerCamelCase__ ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) _a = len(lowerCamelCase__ ) + 1 if any(len(lowerCamelCase__ ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(lowerCamelCase__ ) == 1: return [equations[0][-1] / equations[0][0]] _a = equations.copy() if any(0 in row for row in data_set ): _a = data_set.copy() _a = [] for row_index, row in enumerate(lowerCamelCase__ ): if 0 not in row: _a = data_set.pop(lowerCamelCase__ ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0, lowerCamelCase__ ) _a = data_set.copy() _a = simplify(lowerCamelCase__ ) _a = simplified[::-1] _a = [] for row in simplified: _a = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a = row.copy()[: len(lowerCamelCase__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCamelCase__ ) == 0: solutions.append(0 ) continue _a = temp_row[1::] _a = temp_row[::-1] for column_index, column in enumerate(lowerCamelCase__ ): current_solution -= column * solutions[column_index] solutions.append(lowerCamelCase__ ) _a = [] for item in solutions: final.append(float(round(lowerCamelCase__, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __snake_case : Tuple = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
691
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __snake_case : Dict = logging.get_logger(__name__) __snake_case : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __snake_case : Tuple = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __snake_case : List[Any] = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } __snake_case : Optional[Any] = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } __snake_case : Any = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } __snake_case : List[Any] = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } __snake_case : Any = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } __snake_case : str = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } __snake_case : Optional[int] = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } __snake_case : Optional[Any] = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class A ( a ): __UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES __UpperCAmelCase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class A ( a ): __UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES __UpperCAmelCase : int = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __snake_case : int = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) __snake_case : str = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) __snake_case : Dict = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(a ) class A : def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) elif titles is None or texts is None: _a = titles if texts is None else texts return super().__call__( snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , ) _a = titles if not isinstance(snake_case_ , snake_case_ ) else [titles] _a = texts if not isinstance(snake_case_ , snake_case_ ) else [texts] _a = len(snake_case_ ) _a = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages if len(snake_case_ ) != len(snake_case_ ): raise ValueError( F'''There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts.''' ) _a = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["input_ids"] _a = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["input_ids"] _a = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ ) ] } if return_attention_mask is not False: _a = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) _a = attention_mask return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = 1_6 , snake_case_ = 6_4 , snake_case_ = 4 , ) -> List[DPRSpanPrediction]: _a = reader_input["input_ids"] _a , _a , _a = reader_output[:3] _a = len(snake_case_ ) _a = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ ) _a = [] for doc_id in sorted_docs: _a = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence _a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: _a = sequence_ids.index(self.pad_token_id ) else: _a = len(snake_case_ ) _a = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(snake_case_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> List[DPRSpanPrediction]: _a = [] for start_index, start_score in enumerate(snake_case_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) _a = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ ) _a = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' ) _a = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(snake_case_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(a ) class A ( a , a ): __UpperCAmelCase : str = VOCAB_FILES_NAMES __UpperCAmelCase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Dict = READER_PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""]
691
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing return x.sum() def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing return i + 1 @dataclass class A : __UpperCAmelCase : int __UpperCAmelCase : str class A ( a ): def __lowerCAmelCase ( self ) -> Tuple: _a = {} _a = [] _a = 1 _a = [1, 2] _a = {"a": 1, "b": 2} _a = {"a": [1, 2], "b": [3, 4]} _a = {"a": {"1": 1}, "b": 2} _a = {"a": 1, "b": 2, "c": 3, "d": 4} _a = {} _a = [] _a = 2 _a = [2, 3] _a = {"a": 2, "b": 3} _a = {"a": [2, 3], "b": [4, 5]} _a = {"a": {"1": 2}, "b": 3} _a = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) _a = 2 self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) _a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} _a = {"a": 2, "b": 0, "c": 2} _a = { "a": np.eye(2 ).astype(snake_case_ ), "b": np.zeros(3 ).astype(snake_case_ ), "c": np.ones(2 ).astype(snake_case_ ), } self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(snake_case_ ): # can't pickle a local lambda map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ ) def __lowerCAmelCase ( self ) -> Any: _a = {"a": 1, "b": 2} _a = {"a": 3, "b": 4} _a = {"a": 5, "b": 6} _a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ ) def __lowerCAmelCase ( self ) -> str: class A : __UpperCAmelCase : Optional[int] = """bar""" _a = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(snake_case_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc", [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ): with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: _a = {F'''{i}''': i for i in range(lowerCamelCase__ )} _a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A ( a ): @require_tf def __lowerCAmelCase ( self ) -> Any: import tensorflow as tf from tensorflow.keras import layers _a = layers.Dense(2 ) def gen_random_output(): _a = tf.random.uniform((1, 3) ) return model(snake_case_ ).numpy() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: import torch def gen_random_output(): _a = torch.nn.Linear(3 , 2 ) _a = torch.rand(1 , 3 ) return model(snake_case_ ).detach().numpy() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __lowerCAmelCase ( self ) -> Optional[int]: def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(4_2 ): _a = gen_random_output() with temp_seed(4_2 ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data", [{}] ) def _lowercase ( lowerCamelCase__ : Any ): _a = NestedDataStructure(lowerCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output", [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ], ) def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ): _a = NestedDataStructure(lowerCamelCase__ ).flatten() assert output == expected_output def _lowercase ( ): _a = A(x=1, y="foobar" ) _a = {"x": 1, "y": "foobar"} assert asdict(lowerCamelCase__ ) == expected_output _a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]} _a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(lowerCamelCase__ ) == expected_output with pytest.raises(lowerCamelCase__ ): asdict([1, A(x=10, y="foo" )] ) def _lowercase ( lowerCamelCase__ : str ): return text.split() def _lowercase ( lowerCamelCase__ : List[Any] ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowercase ( ): with Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _a = [] for yield_time, content in iflatmap_unordered( lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowerCamelCase__ ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(lowerCamelCase__ ) == 4
691
1
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class A ( a ): def __init__( self , snake_case_=0.01 , snake_case_=1_0_0_0 ) -> int: _a = p_stop _a = max_length def __iter__( self ) -> Optional[Any]: _a = 0 _a = False while not stop and count < self.max_length: yield count count += 1 _a = random.random() < self.p_stop class A ( unittest.TestCase ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False , snake_case_=True ) -> Any: _a = [ BatchSamplerShard(snake_case_ , 2 , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) for i in range(2 ) ] _a = [list(snake_case_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(snake_case_ ) for shard in batch_sampler_shards] , [len(snake_case_ ) for e in expected] ) self.assertListEqual(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> int: # Check the shards when the dataset is a round multiple of total batch size. _a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) _a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case_ , snake_case_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) _a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) _a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) _a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) # Check the shards when the dataset is very small. _a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) _a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [[], []] self.check_batch_sampler_shards(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: # Check the shards when the dataset is a round multiple of batch size. _a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) _a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size. _a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) _a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) _a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) # Check the shards when the dataset is very small. _a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) _a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [[], []] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ ) def __lowerCAmelCase ( self ) -> Any: # Check the shards when the dataset is a round multiple of total batch size. _a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ ) _a = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is very small. _a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [[[0, 1]], []] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ ) _a = [[], []] self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: # Check the shards when the dataset is a round multiple of batch size. _a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ ) # Expected shouldn't change self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size. _a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ ) _a = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) # Check the shards when the dataset is very small. _a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [[[0, 1]], []] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) _a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ ) _a = [[], []] self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]] _a = [BatchSamplerShard(snake_case_ , 2 , snake_case_ , even_batches=snake_case_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=2 , snake_case_=False ) -> Tuple: random.seed(snake_case_ ) _a = list(snake_case_ ) _a = [ IterableDatasetShard( snake_case_ , batch_size=snake_case_ , drop_last=snake_case_ , num_processes=snake_case_ , process_index=snake_case_ , split_batches=snake_case_ , ) for i in range(snake_case_ ) ] _a = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(snake_case_ ) iterable_dataset_lists.append(list(snake_case_ ) ) _a = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _a = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(snake_case_ ) , len(snake_case_ ) ) self.assertTrue(len(snake_case_ ) % shard_batch_size == 0 ) _a = [] for idx in range(0 , len(snake_case_ ) , snake_case_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(snake_case_ ) < len(snake_case_ ): reference += reference self.assertListEqual(snake_case_ , reference[: len(snake_case_ )] ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = 4_2 _a = RandomIterableDataset() self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) # Edge case with a very small dataset _a = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=snake_case_ ) _a = SkipBatchSampler(snake_case_ , 2 ) self.assertListEqual(list(snake_case_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def __lowerCAmelCase ( self ) -> Any: _a = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = DataLoader(list(range(1_6 ) ) , batch_size=4 ) _a = skip_first_batches(snake_case_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 ) for idx, _ in enumerate(snake_case_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(snake_case_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def __lowerCAmelCase ( self ) -> str: Accelerator() _a = DataLoaderDispatcher(range(1_6 ) , batch_size=4 ) for idx, _ in enumerate(snake_case_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(snake_case_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
691
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(a ) class A ( a ): __UpperCAmelCase : Dict = """rag""" __UpperCAmelCase : Dict = True def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]: super().__init__( bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _a = kwargs.pop("question_encoder" ) _a = question_encoder_config.pop("model_type" ) _a = kwargs.pop("generator" ) _a = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = reduce_loss _a = label_smoothing _a = exclude_bos_score _a = do_marginalize _a = title_sep _a = doc_sep _a = n_docs _a = max_combined_length _a = dataset _a = dataset_split _a = index_name _a = retrieval_vector_size _a = retrieval_batch_size _a = passages_path _a = index_path _a = use_dummy_dataset _a = output_retrieved _a = do_deduplication _a = use_cache if self.forced_eos_token_id is None: _a = getattr(self.generator , "forced_eos_token_id" , snake_case_ ) @classmethod def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = copy.deepcopy(self.__dict__ ) _a = self.question_encoder.to_dict() _a = self.generator.to_dict() _a = self.__class__.model_type return output
691
1
'''simple docstring''' import torch from torch import nn class A ( nn.Module ): def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=1 , snake_case_=False ) -> List[Any]: super().__init__() _a = n_token _a = d_embed _a = d_proj _a = cutoffs + [n_token] _a = [0] + self.cutoffs _a = div_val _a = self.cutoffs[0] _a = len(self.cutoffs ) - 1 _a = self.shortlist_size + self.n_clusters if self.n_clusters > 0: _a = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) _a = nn.Parameter(torch.zeros(self.n_clusters ) ) _a = nn.ModuleList() _a = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) ) else: self.out_projs.append(snake_case_ ) self.out_layers.append(nn.Linear(snake_case_ , snake_case_ ) ) else: for i in range(len(self.cutoffs ) ): _a , _a = self.cutoff_ends[i], self.cutoff_ends[i + 1] _a = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case_ , snake_case_ ) ) ) self.out_layers.append(nn.Linear(snake_case_ , r_idx - l_idx ) ) _a = keep_order def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict: if proj is None: _a = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: _a = nn.functional.linear(snake_case_ , proj.t().contiguous() ) _a = nn.functional.linear(snake_case_ , snake_case_ , bias=snake_case_ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def __lowerCAmelCase ( self , snake_case_ , snake_case_=None , snake_case_=False ) -> int: if labels is not None: # Shift so that tokens < n predict n _a = hidden[..., :-1, :].contiguous() _a = labels[..., 1:].contiguous() _a = hidden.view(-1 , hidden.size(-1 ) ) _a = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError("Input and labels should have the same size in the batch dimension." ) else: _a = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: _a = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: _a = labels != -1_0_0 _a = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device ) _a = ( -nn.functional.log_softmax(snake_case_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: _a = nn.functional.log_softmax(snake_case_ , dim=-1 ) else: # construct weights and biases _a , _a = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: _a , _a = self.cutoff_ends[i], self.cutoff_ends[i + 1] _a = self.out_layers[0].weight[l_idx:r_idx] _a = self.out_layers[0].bias[l_idx:r_idx] else: _a = self.out_layers[i].weight _a = self.out_layers[i].bias if i == 0: _a = torch.cat([weight_i, self.cluster_weight] , dim=0 ) _a = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(snake_case_ ) biases.append(snake_case_ ) _a , _a , _a = weights[0], biases[0], self.out_projs[0] _a = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = nn.functional.log_softmax(snake_case_ , dim=1 ) if labels is None: _a = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: _a = torch.zeros_like(snake_case_ , dtype=hidden.dtype , device=hidden.device ) _a = 0 _a = [0] + self.cutoffs for i in range(len(snake_case_ ) - 1 ): _a , _a = cutoff_values[i], cutoff_values[i + 1] if labels is not None: _a = (labels >= l_idx) & (labels < r_idx) _a = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue _a = labels.index_select(0 , snake_case_ ) - l_idx _a = head_logprob.index_select(0 , snake_case_ ) _a = hidden.index_select(0 , snake_case_ ) else: _a = hidden if i == 0: if labels is not None: _a = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: _a = head_logprob[:, : self.cutoffs[0]] else: _a , _a , _a = weights[i], biases[i], self.out_projs[i] _a = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = nn.functional.log_softmax(snake_case_ , dim=1 ) _a = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: _a = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: _a = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i _a = logprob_i if labels is not None: if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order: out.index_copy_(0 , snake_case_ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: if self.n_clusters == 0: _a = self._compute_logit(snake_case_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(snake_case_ , dim=-1 ) else: # construct weights and biases _a , _a = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: _a , _a = self.cutoff_ends[i], self.cutoff_ends[i + 1] _a = self.out_layers[0].weight[l_idx:r_idx] _a = self.out_layers[0].bias[l_idx:r_idx] else: _a = self.out_layers[i].weight _a = self.out_layers[i].bias if i == 0: _a = torch.cat([weight_i, self.cluster_weight] , dim=0 ) _a = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(snake_case_ ) biases.append(snake_case_ ) _a , _a , _a = weights[0], biases[0], self.out_projs[0] _a = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = hidden.new_empty((head_logit.size(0 ), self.n_token) ) _a = nn.functional.log_softmax(snake_case_ , dim=1 ) _a = [0] + self.cutoffs for i in range(len(snake_case_ ) - 1 ): _a , _a = cutoff_values[i], cutoff_values[i + 1] if i == 0: _a = head_logprob[:, : self.cutoffs[0]] else: _a , _a , _a = weights[i], biases[i], self.out_projs[i] _a = self._compute_logit(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = nn.functional.log_softmax(snake_case_ , dim=1 ) _a = head_logprob[:, -i] + tail_logprob_i _a = logprob_i return out
691
'''simple docstring''' class A : def __init__( self ) -> List[str]: _a = 0 _a = 0 _a = {} def __lowerCAmelCase ( self , snake_case_ ) -> int: if vertex not in self.adjacency: _a = {} self.num_vertices += 1 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]: self.add_vertex(snake_case_ ) self.add_vertex(snake_case_ ) if head == tail: return _a = weight _a = weight def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.get_edges() for edge in edges: _a , _a , _a = edge edges.remove((tail, head, weight) ) for i in range(len(snake_case_ ) ): _a = list(edges[i] ) edges.sort(key=lambda snake_case_ : e[2] ) for i in range(len(snake_case_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _a = edges[i][2] + 1 for edge in edges: _a , _a , _a = edge _a = weight _a = weight def __str__( self ) -> Optional[int]: _a = "" for tail in self.adjacency: for head in self.adjacency[tail]: _a = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip("\n" ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __lowerCAmelCase ( self ) -> Any: return self.adjacency.keys() @staticmethod def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any: _a = Graph() if vertices is None: _a = [] if edges is None: _a = [] for vertex in vertices: g.add_vertex(snake_case_ ) for edge in edges: g.add_edge(*snake_case_ ) return g class A : def __init__( self ) -> Optional[int]: _a = {} _a = {} def __len__( self ) -> List[Any]: return len(self.parent ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: if item in self.parent: return self.find(snake_case_ ) _a = item _a = 0 return item def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]: if item not in self.parent: return self.make_set(snake_case_ ) if item != self.parent[item]: _a = self.find(self.parent[item] ) return self.parent[item] def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]: _a = self.find(snake_case_ ) _a = self.find(snake_case_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _a = roota return roota if self.rank[roota] < self.rank[roota]: _a = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _a = roota return roota return None @staticmethod def __lowerCAmelCase ( snake_case_ ) -> Tuple: _a = graph.num_vertices _a = Graph.UnionFind() _a = [] while num_components > 1: _a = {} for vertex in graph.get_vertices(): _a = -1 _a = graph.get_edges() for edge in edges: _a , _a , _a = edge edges.remove((tail, head, weight) ) for edge in edges: _a , _a , _a = edge _a = union_find.find(snake_case_ ) _a = union_find.find(snake_case_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _a = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _a = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _a , _a , _a = cheap_edge[vertex] if union_find.find(snake_case_ ) != union_find.find(snake_case_ ): union_find.union(snake_case_ , snake_case_ ) mst_edges.append(cheap_edge[vertex] ) _a = num_components - 1 _a = Graph.build(edges=snake_case_ ) return mst
691
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class A ( a ): @staticmethod @abstractmethod def __lowerCAmelCase ( snake_case_ ) -> str: raise NotImplementedError() @abstractmethod def __lowerCAmelCase ( self ) -> List[str]: raise NotImplementedError()
691
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __snake_case : Tuple = "\\n Text data.\n Second line of data." __snake_case : int = "file" @pytest.fixture(scope="session" ) def _lowercase ( lowerCamelCase__ : Optional[Any] ): _a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") _a = bytes(lowerCamelCase__, "utf-8" ) with zstd.open(lowerCamelCase__, "wb" ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture def _lowercase ( lowerCamelCase__ : int ): with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f: f.write(lowerCamelCase__ ) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] ) def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ): _a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} _a = input_paths[compression_format] _a = tmp_path / "cache" _a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ ) _a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ ) with open(lowerCamelCase__ ) as f: _a = f.read() with open(lowerCamelCase__ ) as f: _a = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False] ) @pytest.mark.parametrize("default_cache_dir", [True, False] ) def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ): _a = "custom_cache" _a = "custom_extracted_dir" _a = tmp_path / "custom_extracted_path" if default_extracted: _a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) ) _a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _a = xz_file _a = ( DownloadConfig(extract_compressed_file=lowerCamelCase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ ) ) _a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ ) assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected def _lowercase ( lowerCamelCase__ : Union[str, Any] ): # absolute path _a = str(Path(lowerCamelCase__ ).resolve() ) assert cached_path(lowerCamelCase__ ) == text_file # relative path _a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCamelCase__ ) == text_file def _lowercase ( lowerCamelCase__ : Dict ): # absolute path _a = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(lowerCamelCase__ ): cached_path(lowerCamelCase__ ) # relative path _a = "./__missing_file__.txt" with pytest.raises(lowerCamelCase__ ): cached_path(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(lowerCamelCase__ ) as f: _a = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( ): with pytest.raises(lowerCamelCase__ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): http_get("https://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any] ): _a = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase__ ): fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ ) with pytest.raises(lowerCamelCase__ ): fsspec_head("s3://huggingface.co" )
691
1
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) __snake_case : Optional[int] = logging.getLogger(__name__) class A ( a ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None ) -> Optional[Any]: _a = self.layer[current_layer](snake_case_ , snake_case_ , head_mask[current_layer] ) _a = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , a , ) class A ( a ): def __init__( self , snake_case_ ) -> Union[str, Any]: super().__init__(snake_case_ ) _a = BertEncoderWithPabee(snake_case_ ) self.init_weights() _a = 0 _a = 0 _a = 0 _a = 0 def __lowerCAmelCase ( self , snake_case_ ) -> Union[str, Any]: _a = threshold def __lowerCAmelCase ( self , snake_case_ ) -> int: _a = patience def __lowerCAmelCase ( self ) -> str: _a = 0 _a = 0 def __lowerCAmelCase ( self ) -> Tuple: _a = self.inference_layers_num / self.inference_instances_num _a = ( F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(snake_case_ ) @add_start_docstrings_to_model_forward(snake_case_ ) def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=False , ) -> List[str]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: _a = input_ids.size() elif inputs_embeds is not None: _a = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) _a = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _a = torch.ones(snake_case_ , device=snake_case_ ) if token_type_ids is None: _a = torch.zeros(snake_case_ , dtype=torch.long , device=snake_case_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _a = self.get_extended_attention_mask(snake_case_ , snake_case_ , snake_case_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: _a , _a , _a = encoder_hidden_states.size() _a = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: _a = torch.ones(snake_case_ , device=snake_case_ ) _a = self.invert_attention_mask(snake_case_ ) else: _a = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _a = self.get_head_mask(snake_case_ , self.config.num_hidden_layers ) _a = self.embeddings( input_ids=snake_case_ , position_ids=snake_case_ , token_type_ids=snake_case_ , inputs_embeds=snake_case_ ) _a = embedding_output if self.training: _a = [] for i in range(self.config.num_hidden_layers ): _a = self.encoder.adaptive_forward( snake_case_ , current_layer=snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ ) _a = self.pooler(snake_case_ ) _a = output_layers[i](output_dropout(snake_case_ ) ) res.append(snake_case_ ) elif self.patience == 0: # Use all layers for inference _a = self.encoder( snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , ) _a = self.pooler(encoder_outputs[0] ) _a = [output_layers[self.config.num_hidden_layers - 1](snake_case_ )] else: _a = 0 _a = None _a = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 _a = self.encoder.adaptive_forward( snake_case_ , current_layer=snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ ) _a = self.pooler(snake_case_ ) _a = output_layers[i](snake_case_ ) if regression: _a = logits.detach() if patient_result is not None: _a = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: _a = 0 else: _a = logits.detach().argmax(dim=1 ) if patient_result is not None: _a = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(snake_case_ ) ): patient_counter += 1 else: _a = 0 _a = logits if patient_counter == self.patience: break _a = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , a , ) class A ( a ): def __init__( self , snake_case_ ) -> Tuple: super().__init__(snake_case_ ) _a = config.num_labels _a = BertModelWithPabee(snake_case_ ) _a = nn.Dropout(config.hidden_dropout_prob ) _a = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(snake_case_ ) def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> List[str]: _a = self.bert( input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) _a = (logits[-1],) if labels is not None: _a = None _a = 0 for ix, logits_item in enumerate(snake_case_ ): if self.num_labels == 1: # We are doing regression _a = MSELoss() _a = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: _a = CrossEntropyLoss() _a = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: _a = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 _a = (total_loss / total_weights,) + outputs return outputs
691
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __snake_case : Union[str, Any] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowercase ( lowerCamelCase__ : List[Any] ): _a = {} state_dict.pop("pixel_mean", lowerCamelCase__ ) state_dict.pop("pixel_std", lowerCamelCase__ ) _a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _a = key.replace(lowerCamelCase__, lowerCamelCase__ ) if re.match(lowerCamelCase__, lowerCamelCase__ ): _a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) ) if layer_nb == 0: _a = key.replace("layers.0", "proj_in" ) elif layer_nb == 1: _a = key.replace("layers.1", "layers.0" ) elif layer_nb == 2: _a = key.replace("layers.2", "proj_out" ) _a = value _a = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ): _a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _a = SamConfig() elif "sam_vit_l" in model_name: _a = SamVisionConfig( hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) _a = SamConfig( vision_config=lowerCamelCase__, ) elif "sam_vit_h" in model_name: _a = SamVisionConfig( hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) _a = SamConfig( vision_config=lowerCamelCase__, ) _a = torch.load(lowerCamelCase__, map_location="cpu" ) _a = replace_keys(lowerCamelCase__ ) _a = SamImageProcessor() _a = SamProcessor(image_processor=lowerCamelCase__ ) _a = SamModel(lowerCamelCase__ ) hf_model.load_state_dict(lowerCamelCase__ ) _a = hf_model.to("cuda" ) _a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" _a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" ) _a = [[[400, 650]]] _a = [[1]] _a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 _a = ((75, 275, 1_725, 850),) _a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. _a = [[[400, 650], [800, 650]]] _a = [[1, 1]] _a = processor( images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" ) with torch.no_grad(): _a = hf_model(**lowerCamelCase__ ) _a = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": __snake_case : Union[str, Any] = argparse.ArgumentParser() __snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) __snake_case : str = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
691
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): if b == 0: return 1 if (b % 2) == 0: return actual_power(lowerCamelCase__, int(b / 2 ) ) * actual_power(lowerCamelCase__, int(b / 2 ) ) else: return a * actual_power(lowerCamelCase__, int(b / 2 ) ) * actual_power(lowerCamelCase__, int(b / 2 ) ) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): if b < 0: return 1 / actual_power(lowerCamelCase__, lowerCamelCase__ ) return actual_power(lowerCamelCase__, lowerCamelCase__ ) if __name__ == "__main__": print(power(-2, -3))
691
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase__ : List[Any] ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _a = [] for i in range(lowerCamelCase__ ): _a = i / num_diffusion_timesteps _a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) ) return torch.tensor(lowerCamelCase__, dtype=torch.floataa ) class A ( a , a ): __UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers] __UpperCAmelCase : Optional[int] = 2 @register_to_config def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]: if trained_betas is not None: _a = torch.tensor(snake_case_ , dtype=torch.floataa ) elif beta_schedule == "linear": _a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _a = betas_for_alpha_bar(snake_case_ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) _a = 1.0 - self.betas _a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(snake_case_ , snake_case_ , snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict: if schedule_timesteps is None: _a = self.timesteps _a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _a = 1 if len(snake_case_ ) > 1 else 0 else: _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep _a = self._index_counter[timestep_int] return indices[pos].item() @property def __lowerCAmelCase ( self ) -> Dict: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor: _a = self.index_for_timestep(snake_case_ ) if self.state_in_first_order: _a = self.sigmas[step_index] else: _a = self.sigmas_interpol[step_index] _a = sample / ((sigma**2 + 1) ** 0.5) return sample def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]: _a = num_inference_steps _a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy() elif self.config.timestep_spacing == "leading": _a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) _a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ ) _a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ ) _a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _a = torch.from_numpy(snake_case_ ).to(device=snake_case_ ) # interpolate sigmas _a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() _a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) _a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(snake_case_ ).startswith("mps" ): # mps does not support float64 _a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa ) else: _a = torch.from_numpy(snake_case_ ).to(snake_case_ ) # interpolate timesteps _a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype ) _a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() _a = torch.cat([timesteps[:1], interleaved_timesteps] ) _a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _a = defaultdict(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]: # get log sigma _a = sigma.log() # get distribution _a = log_sigma - self.log_sigmas[:, None] # get sigmas range _a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) _a = low_idx + 1 _a = self.log_sigmas[low_idx] _a = self.log_sigmas[high_idx] # interpolate sigmas _a = (low - log_sigma) / (low - high) _a = w.clamp(0 , 1 ) # transform interpolation to time range _a = (1 - w) * low_idx + w * high_idx _a = t.view(sigma.shape ) return t @property def __lowerCAmelCase ( self ) -> List[Any]: return self.sample is None def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]: _a = self.index_for_timestep(snake_case_ ) # advance index counter by 1 _a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _a = self.sigmas[step_index] _a = self.sigmas_interpol[step_index + 1] _a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _a = self.sigmas[step_index - 1] _a = self.sigmas_interpol[step_index] _a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _a = 0 _a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _a = sigma_hat if self.state_in_first_order else sigma_interpol _a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _a = sigma_interpol - sigma_hat # store for 2nd order step _a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _a = sigma_next - sigma_hat _a = self.sample _a = None _a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples _a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ): # mps does not support float64 _a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) _a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: _a = self.timesteps.to(original_samples.device ) _a = timesteps.to(original_samples.device ) _a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps] _a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _a = sigma.unsqueeze(-1 ) _a = original_samples + noise * sigma return noisy_samples def __len__( self ) -> str: return self.config.num_train_timesteps
691
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor __snake_case : Optional[Any] = logging.get_logger(__name__) class A ( a ): def __init__( self , *snake_case_ , **snake_case_ ) -> None: warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ): # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ): # Color current vertex _a = i # Validate coloring if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ): return True # Backtrack _a = -1 return False def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ): _a = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ): return colored_vertices return []
691
1
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list ): if not isinstance(lowerCamelCase__, lowerCamelCase__ ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(lowerCamelCase__ ) == 0: raise ValueError("Input list must be a non empty list" ) if len(lowerCamelCase__ ) == 1: return True _a = series[1] - series[0] for index in range(len(lowerCamelCase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _lowercase ( lowerCamelCase__ : list ): if not isinstance(lowerCamelCase__, lowerCamelCase__ ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(lowerCamelCase__ ) == 0: raise ValueError("Input list must be a non empty list" ) _a = 0 for val in series: answer += val return answer / len(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class A : def __init__( self , snake_case_ ) -> Optional[int]: _a = str(id_ ) _a = None _a = None _a = [] _a = {} # {vertex:distance} def __lt__( self , snake_case_ ) -> Optional[Any]: return self.key < other.key def __repr__( self ) -> Union[str, Any]: return self.id def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: self.neighbors.append(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any: _a = weight def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ ) graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ): _a = [] for u in graph: _a = math.inf _a = None _a = 0 _a = graph[:] while q: _a = min(lowerCamelCase__ ) q.remove(lowerCamelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _a = u _a = u.edges[v.id] for i in range(1, len(lowerCamelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ): for u in graph: _a = math.inf _a = None _a = 0 _a = list(lowerCamelCase__ ) hq.heapify(lowerCamelCase__ ) while h: _a = hq.heappop(lowerCamelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _a = u _a = u.edges[v.id] hq.heapify(lowerCamelCase__ ) for i in range(1, len(lowerCamelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _lowercase ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' import numpy # List of input, output pairs __snake_case : List[str] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __snake_case : Tuple = (((515, 22, 13), 555), ((61, 35, 49), 150)) __snake_case : List[Any] = [2, 4, 1, 5] __snake_case : Union[str, Any] = len(train_data) __snake_case : Tuple = 0.009 def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str]="train" ): return calculate_hypothesis_value(lowerCamelCase__, lowerCamelCase__ ) - output( lowerCamelCase__, lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Union[str, Any] ): _a = 0 for i in range(len(lowerCamelCase__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : int ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : int ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : int=m ): _a = 0 for i in range(lowerCamelCase__ ): if index == -1: summation_value += _error(lowerCamelCase__ ) else: summation_value += _error(lowerCamelCase__ ) * train_data[i][0][index] return summation_value def _lowercase ( lowerCamelCase__ : Tuple ): _a = summation_of_cost_derivative(lowerCamelCase__, lowerCamelCase__ ) / m return cost_derivative_value def _lowercase ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output _a = 0.00_00_02 _a = 0 _a = 0 while True: j += 1 _a = [0, 0, 0, 0] for i in range(0, len(lowerCamelCase__ ) ): _a = get_cost_derivative(i - 1 ) _a = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCamelCase__, lowerCamelCase__, atol=lowerCamelCase__, rtol=lowerCamelCase__, ): break _a = temp_parameter_vector print(("Number of iterations:", j) ) def _lowercase ( ): for i in range(len(lowerCamelCase__ ) ): print(("Actual output value:", output(lowerCamelCase__, "test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase__, "test" )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
691
'''simple docstring''' __snake_case : List[str] = "Tobias Carryer" from time import time class A : def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008 _a = multiplier _a = increment _a = modulo _a = seed def __lowerCAmelCase ( self ) -> str: _a = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
691
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class A ( a ): __UpperCAmelCase : int = 0 __UpperCAmelCase : bool = False __UpperCAmelCase : float = 3.0 class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=snake_case_ ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def __lowerCAmelCase ( self ) -> Optional[int]: # If no defaults are changed, `to_kwargs` returns an empty dict. _a = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 ) AcceleratorState._reset_state() _a = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _a = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_0_0_0 ) self.assertEqual(scaler._enabled , snake_case_ ) @require_multi_gpu def __lowerCAmelCase ( self ) -> Any: _a = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(snake_case_ , env=os.environ.copy() ) if __name__ == "__main__": __snake_case : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) __snake_case : Any = Accelerator(kwargs_handlers=[ddp_scaler]) __snake_case : Optional[Any] = torch.nn.Linear(100, 200) __snake_case : Optional[int] = accelerator.prepare(model) # Check the values changed in kwargs __snake_case : str = "" __snake_case : str = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
691
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __snake_case : List[str] = logging.get_logger("transformers.models.encodec") __snake_case : Tuple = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } __snake_case : int = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } __snake_case : Optional[int] = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } __snake_case : Tuple = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } __snake_case : int = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } __snake_case : Union[str, Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __snake_case : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __snake_case : Tuple = [] __snake_case : Optional[int] = [] def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ): for attribute in key.split("." ): _a = getattr(lowerCamelCase__, lowerCamelCase__ ) if weight_type is not None: _a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value elif weight_type == "weight_ih_l0": _a = value elif weight_type == "weight_hh_l0": _a = value elif weight_type == "bias_ih_l0": _a = value elif weight_type == "bias_hh_l0": _a = value elif weight_type == "weight_ih_l1": _a = value elif weight_type == "weight_hh_l1": _a = value elif weight_type == "bias_ih_l1": _a = value elif weight_type == "bias_hh_l1": _a = value else: _a = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ): _a = [] if model_name == "encodec_24khz" or "encodec_32khz": _a = MAPPING_24K elif model_name == "encodec_48khz": _a = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__, lowerCamelCase__ ): logger.info(F'''{name} was ignored''' ) continue _a = False for key, mapped_key in MAPPING.items(): if "*" in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: _a = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue _a = True if "*" in mapped_key: _a = name.split(lowerCamelCase__ )[0].split("." )[-2] _a = mapped_key.replace("*", lowerCamelCase__ ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "weight_ih_l0" in name: _a = "weight_ih_l0" elif "weight_hh_l0" in name: _a = "weight_hh_l0" elif "bias_ih_l0" in name: _a = "bias_ih_l0" elif "bias_hh_l0" in name: _a = "bias_hh_l0" elif "weight_ih_l1" in name: _a = "weight_ih_l1" elif "weight_hh_l1" in name: _a = "weight_hh_l1" elif "bias_ih_l1" in name: _a = "bias_ih_l1" elif "bias_hh_l1" in name: _a = "bias_hh_l1" elif "bias" in name: _a = "bias" elif "weight" in name: _a = "weight" elif "running_mean" in name: _a = "running_mean" elif "running_var" in name: _a = "running_var" elif "num_batches_tracked" in name: _a = "num_batches_tracked" else: _a = None set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ): if config_path is not None: _a = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: _a = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": _a = [8, 5, 4, 4] _a = [2.2] _a = 64 _a = 32_000 _a = 2_048 _a = False _a = False _a = False elif model_name == "encodec_48khz": _a = [8, 5, 4, 2] _a = [3.0, 6.0, 12.0, 24.0] _a = 48_000 _a = 2 _a = False _a = "time_group_norm" _a = True _a = 1.0 _a = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) _a = EncodecModel(lowerCamelCase__ ) _a = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(lowerCamelCase__ ) _a = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights _a = original_checkpoint["best_state"] recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __snake_case : List[Any] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
691
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( a , a , a , unittest.TestCase ): __UpperCAmelCase : List[Any] = StableDiffusionInpaintPipeline __UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __UpperCAmelCase : Any = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCAmelCase : Tuple = frozenset([] ) def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , ) _a = PNDMScheduler(skip_prk_steps=snake_case_ ) torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) _a = CLIPTextModel(snake_case_ ) _a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _a = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> str: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched _a = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) _a = image.cpu().permute(0 , 2 , 3 , 1 )[0] _a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) ) _a = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((6_4, 6_4) ) if str(snake_case_ ).startswith("mps" ): _a = torch.manual_seed(snake_case_ ) else: _a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _a = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self ) -> Dict: _a = "cpu" # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = StableDiffusionInpaintPipeline(**snake_case_ ) _a = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _a = self.get_dummy_inputs(snake_case_ ) _a = sd_pipe(**snake_case_ ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) _a = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> int: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) _a = "stabilityai/stable-diffusion-2-inpainting" _a = StableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _a = "Face of a yellow cat, high resolution, sitting on a park bench" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , output_type="np" , ) _a = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __lowerCAmelCase ( self ) -> Optional[Any]: _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) _a = "stabilityai/stable-diffusion-2-inpainting" _a = StableDiffusionInpaintPipeline.from_pretrained( snake_case_ , torch_dtype=torch.floataa , safety_checker=snake_case_ , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() _a = "Face of a yellow cat, high resolution, sitting on a park bench" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , output_type="np" , ) _a = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __lowerCAmelCase ( self ) -> Any: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) _a = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) _a = "stabilityai/stable-diffusion-2-inpainting" _a = PNDMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" ) _a = StableDiffusionInpaintPipeline.from_pretrained( snake_case_ , safety_checker=snake_case_ , scheduler=snake_case_ , torch_dtype=torch.floataa , ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _a = "Face of a yellow cat, high resolution, sitting on a park bench" _a = torch.manual_seed(0 ) _a = pipe( prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ) _a = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
691
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case : int = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
691
1
'''simple docstring''' import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n" class A ( a ): @add_start_docstrings(snake_case_ ) def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class A ( a ): def __init__( self , snake_case_ , snake_case_ = None ) -> str: _a = max_length _a = max_position_embeddings @add_start_docstrings(snake_case_ ) def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool: _a = input_ids.shape[-1] _a = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class A ( a ): def __init__( self , snake_case_ , snake_case_ ) -> Dict: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , snake_case_ , ) _a = start_length _a = max_new_tokens _a = start_length + max_new_tokens @add_start_docstrings(snake_case_ ) def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool: return input_ids.shape[-1] >= self.max_length class A ( a ): def __init__( self , snake_case_ , snake_case_ = None ) -> Optional[int]: _a = max_time _a = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(snake_case_ ) def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool: return time.time() - self.initial_timestamp > self.max_time class A ( a ): @add_start_docstrings(snake_case_ ) def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool: return any(criteria(snake_case_ , snake_case_ ) for criteria in self ) @property def __lowerCAmelCase ( self ) -> Optional[int]: for stopping_criterium in self: if isinstance(snake_case_ , snake_case_ ): return stopping_criterium.max_length elif isinstance(snake_case_ , snake_case_ ): return stopping_criterium.max_length return None def _lowercase ( lowerCamelCase__ : StoppingCriteriaList, lowerCamelCase__ : int ): _a = stopping_criteria.max_length _a = deepcopy(lowerCamelCase__ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", lowerCamelCase__ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCamelCase__ ) ) return new_stopping_criteria
691
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=a ): __UpperCAmelCase : int = ["""torch""", """scipy"""] def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple: requires_backends(self , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]: requires_backends(cls , ["torch", "scipy"] ) @classmethod def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any: requires_backends(cls , ["torch", "scipy"] )
691
1
'''simple docstring''' import math def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float ): if ( not isinstance(lowerCamelCase__, (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float ): if ( not isinstance(lowerCamelCase__, (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
691
'''simple docstring''' __snake_case : Dict = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
691
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case : Union[str, Any] = logging.get_logger(__name__) class A ( a ): __UpperCAmelCase : Optional[Any] = ["""pixel_values"""] def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = 0.9 , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> None: super().__init__(**snake_case_ ) _a = size if size is not None else {"shortest_edge": 2_2_4} _a = get_size_dict(snake_case_ , default_to_square=snake_case_ ) _a = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} _a = get_size_dict(snake_case_ , param_name="crop_size" ) _a = do_resize _a = size _a = crop_pct _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ) -> np.ndarray: _a = get_size_dict(snake_case_ , default_to_square=snake_case_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) if crop_pct is not None: if "shortest_edge" in size: _a = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: _a = int(size["height"] / crop_pct ) else: _a = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(snake_case_ ) ) _a = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ ) else: if "shortest_edge" in size: _a = get_resize_output_image_size(snake_case_ , size=size["shortest_edge"] , default_to_square=snake_case_ ) elif "height" in size and "width" in size: _a = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(snake_case_ ) ) return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray: _a = get_size_dict(snake_case_ ) if "height" not in size or "width" not in size: raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> Union[str, Any]: return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray: return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = crop_pct if crop_pct is not None else self.crop_pct _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(snake_case_ , default_to_square=snake_case_ ) _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(snake_case_ , param_name="crop_size" ) _a = make_list_of_images(snake_case_ ) if not valid_images(snake_case_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _a = [to_numpy_array(snake_case_ ) for image in images] if do_resize: _a = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images] if do_rescale: _a = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images] if do_normalize: _a = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images] _a = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images] _a = {"pixel_values": images} return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
691
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = ProphetNetTokenizer __UpperCAmelCase : Optional[Any] = False def __lowerCAmelCase ( self ) -> Tuple: super().setUp() _a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , snake_case_ ) -> Any: _a = "UNwant\u00E9d,running" _a = "unwanted, running" return input_text, output_text def __lowerCAmelCase ( self ) -> Any: _a = self.tokenizer_class(self.vocab_file ) _a = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def __lowerCAmelCase ( self ) -> List[str]: _a = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> List[Any]: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> int: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __lowerCAmelCase ( self ) -> List[str]: _a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] _a = {} for i, token in enumerate(snake_case_ ): _a = i _a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = ["A long paragraph for summarization.", "Another paragraph for summarization."] _a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2] _a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ ) _a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == text + [1_0_2] assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
691
1
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = ProphetNetTokenizer __UpperCAmelCase : Optional[Any] = False def __lowerCAmelCase ( self ) -> Tuple: super().setUp() _a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , snake_case_ ) -> Any: _a = "UNwant\u00E9d,running" _a = "unwanted, running" return input_text, output_text def __lowerCAmelCase ( self ) -> Any: _a = self.tokenizer_class(self.vocab_file ) _a = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def __lowerCAmelCase ( self ) -> List[str]: _a = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> Any: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __lowerCAmelCase ( self ) -> List[Any]: _a = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> int: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Tuple: _a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __lowerCAmelCase ( self ) -> List[str]: _a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] _a = {} for i, token in enumerate(snake_case_ ): _a = i _a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = ["A long paragraph for summarization.", "Another paragraph for summarization."] _a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2] _a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" ) self.assertIsInstance(snake_case_ , snake_case_ ) _a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __lowerCAmelCase ( self ) -> List[Any]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" ) _a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ ) _a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ ) _a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == text + [1_0_2] assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
691
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _lowercase ( ): _a = argparse.ArgumentParser() parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" ) parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 ) parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 ) parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 ) parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ ) parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 ) parser.add_argument("--seed", type=lowerCamelCase__, default=0 ) parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" ) parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 ) parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 ) parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" ) return parser.parse_args() __snake_case : str = load("accuracy") def _lowercase ( lowerCamelCase__ : List[str] ): _a , _a = eval_pred _a = np.argmax(lowerCamelCase__, axis=1 ) return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ ) class A ( a ): def __init__( self , snake_case_ ) -> None: super().__init__() _a = trainer def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]: if control.should_evaluate: _a = deepcopy(snake_case_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" ) return control_copy def _lowercase ( ): _a = get_args() set_seed(args.seed ) _a = load_dataset("codeparrot/codecomplex", split="train" ) _a = dataset.train_test_split(test_size=0.2 ) _a = train_test["test"].train_test_split(test_size=0.5 ) _a = DatasetDict( { "train": train_test["train"], "test": test_validation["train"], "valid": test_validation["test"], } ) print("Loading tokenizer and model" ) _a = AutoTokenizer.from_pretrained(args.model_ckpt ) _a = tokenizer.eos_token _a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 ) _a = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _a = False _a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) ) def tokenize(lowerCamelCase__ : Tuple ): _a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 ) _a = labels.straint(example["complexity"] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _a = train_test_validation.map( lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, ) _a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ ) _a = TrainingArguments( output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", ) _a = Trainer( model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, ) print("Training..." ) trainer.add_callback(CustomCallback(lowerCamelCase__ ) ) trainer.train() if __name__ == "__main__": main()
691
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __snake_case : List[Any] = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ): _a = True while ask_again: _a = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ): _a = BulletMenu(lowerCamelCase__, lowerCamelCase__ ) _a = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _lowercase ( lowerCamelCase__ : Dict ): _a = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _lowercase ( lowerCamelCase__ : List[Any] ): _a = int(lowerCamelCase__ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _lowercase ( lowerCamelCase__ : str ): return {"yes": True, "no": False}[value.lower()] class A ( argparse.RawDescriptionHelpFormatter ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = usage.replace("<command> [<args>] " , "" ) return usage
691
'''simple docstring''' # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ): _a = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] _a = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } _a = F'''{src_lang}-{tgt_lang}''' _a = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ ) _a = os.path.join(lowerCamelCase__, "README.md" ) print(F'''Generating {path}''' ) with open(lowerCamelCase__, "w", encoding="utf-8" ) as f: f.write(lowerCamelCase__ ) # make sure we are under the root of the project __snake_case : int = Path(__file__).resolve().parent.parent.parent __snake_case : int = repo_dir / "model_cards" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: __snake_case : Any = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
691
1
'''simple docstring''' import sys def _lowercase ( lowerCamelCase__ : Dict ): _a = len(lowerCamelCase__ ) _a = [[0 for x in range(lowerCamelCase__ )] for x in range(lowerCamelCase__ )] _a = [[0 for x in range(lowerCamelCase__ )] for x in range(lowerCamelCase__ )] for chain_length in range(2, lowerCamelCase__ ): for a in range(1, n - chain_length + 1 ): _a = a + chain_length - 1 _a = sys.maxsize for c in range(lowerCamelCase__, lowerCamelCase__ ): _a = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: _a = cost _a = c return matrix, sol def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ): if i == j: print("A" + str(lowerCamelCase__ ), end=" " ) else: print("(", end=" " ) print_optiomal_solution(lowerCamelCase__, lowerCamelCase__, optimal_solution[i][j] ) print_optiomal_solution(lowerCamelCase__, optimal_solution[i][j] + 1, lowerCamelCase__ ) print(")", end=" " ) def _lowercase ( ): _a = [30, 35, 15, 5, 10, 20, 25] _a = len(lowerCamelCase__ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 _a , _a = matrix_chain_order(lowerCamelCase__ ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(lowerCamelCase__, 1, n - 1 ) if __name__ == "__main__": main()
691
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __snake_case : List[str] = logging.get_logger(__name__) __snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp __snake_case : str = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } __snake_case : Dict = { "RUCAIBox/mvp": 1024, } class A ( a ): __UpperCAmelCase : int = VOCAB_FILES_NAMES __UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""] __UpperCAmelCase : List[Any] = MvpTokenizer def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]: super().__init__( snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , ) _a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space: _a = getattr(snake_case_ , pre_tok_state.pop("type" ) ) _a = add_prefix_space _a = pre_tok_class(**snake_case_ ) _a = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _a = "post_processor" _a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ ) if tokenizer_component_instance: _a = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _a = tuple(state["sep"] ) if "cls" in state: _a = tuple(state["cls"] ) _a = False if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space: _a = add_prefix_space _a = True if state.get("trim_offsets" , snake_case_ ) != trim_offsets: _a = trim_offsets _a = True if changes_to_apply: _a = getattr(snake_case_ , state.pop("type" ) ) _a = component_class(**snake_case_ ) setattr(self.backend_tokenizer , snake_case_ , snake_case_ ) @property def __lowerCAmelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]: _a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value _a = value def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding: _a = kwargs.get("is_split_into_words" , snake_case_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding: _a = kwargs.get("is_split_into_words" , snake_case_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case_ , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]: _a = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]: _a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
691
1
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __snake_case : List[str] = logging.get_logger("transformers.models.encodec") __snake_case : Tuple = { "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited", "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size", "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed", "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg", } __snake_case : int = { "encoder.model.0.conv.conv": "encoder.layers.0.conv", "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv", "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv", "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv", "encoder.model.3.conv.conv": "encoder.layers.3.conv", "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv", "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv", "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv", "encoder.model.6.conv.conv": "encoder.layers.6.conv", "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv", "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv", "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv", "encoder.model.9.conv.conv": "encoder.layers.9.conv", "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv", "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv", "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv", "encoder.model.12.conv.conv": "encoder.layers.12.conv", "encoder.model.13.lstm": "encoder.layers.13.lstm", "encoder.model.15.conv.conv": "encoder.layers.15.conv", } __snake_case : Optional[int] = { "encoder.model.0.conv.norm": "encoder.layers.0.norm", "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm", "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm", "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm", "encoder.model.3.conv.norm": "encoder.layers.3.norm", "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm", "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm", "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm", "encoder.model.6.conv.norm": "encoder.layers.6.norm", "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm", "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm", "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm", "encoder.model.9.conv.norm": "encoder.layers.9.norm", "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm", "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm", "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm", "encoder.model.12.conv.norm": "encoder.layers.12.norm", "encoder.model.15.conv.norm": "encoder.layers.15.norm", } __snake_case : Tuple = { "decoder.model.0.conv.conv": "decoder.layers.0.conv", "decoder.model.1.lstm": "decoder.layers.1.lstm", "decoder.model.3.convtr.convtr": "decoder.layers.3.conv", "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv", "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv", "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv", "decoder.model.6.convtr.convtr": "decoder.layers.6.conv", "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv", "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv", "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv", "decoder.model.9.convtr.convtr": "decoder.layers.9.conv", "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv", "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv", "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv", "decoder.model.12.convtr.convtr": "decoder.layers.12.conv", "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv", "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv", "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv", "decoder.model.15.conv.conv": "decoder.layers.15.conv", } __snake_case : int = { "decoder.model.0.conv.norm": "decoder.layers.0.norm", "decoder.model.3.convtr.norm": "decoder.layers.3.norm", "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm", "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm", "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm", "decoder.model.6.convtr.norm": "decoder.layers.6.norm", "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm", "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm", "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm", "decoder.model.9.convtr.norm": "decoder.layers.9.norm", "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm", "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm", "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm", "decoder.model.12.convtr.norm": "decoder.layers.12.norm", "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm", "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm", "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm", "decoder.model.15.conv.norm": "decoder.layers.15.norm", } __snake_case : Union[str, Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __snake_case : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __snake_case : Tuple = [] __snake_case : Optional[int] = [] def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ): for attribute in key.split("." ): _a = getattr(lowerCamelCase__, lowerCamelCase__ ) if weight_type is not None: _a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value elif weight_type == "weight_ih_l0": _a = value elif weight_type == "weight_hh_l0": _a = value elif weight_type == "bias_ih_l0": _a = value elif weight_type == "bias_hh_l0": _a = value elif weight_type == "weight_ih_l1": _a = value elif weight_type == "weight_hh_l1": _a = value elif weight_type == "bias_ih_l1": _a = value elif weight_type == "bias_hh_l1": _a = value else: _a = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ): _a = [] if model_name == "encodec_24khz" or "encodec_32khz": _a = MAPPING_24K elif model_name == "encodec_48khz": _a = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__, lowerCamelCase__ ): logger.info(F'''{name} was ignored''' ) continue _a = False for key, mapped_key in MAPPING.items(): if "*" in key: _a , _a = key.split(".*." ) if prefix in name and suffix in name: _a = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue _a = True if "*" in mapped_key: _a = name.split(lowerCamelCase__ )[0].split("." )[-2] _a = mapped_key.replace("*", lowerCamelCase__ ) if "weight_g" in name: _a = "weight_g" elif "weight_v" in name: _a = "weight_v" elif "weight_ih_l0" in name: _a = "weight_ih_l0" elif "weight_hh_l0" in name: _a = "weight_hh_l0" elif "bias_ih_l0" in name: _a = "bias_ih_l0" elif "bias_hh_l0" in name: _a = "bias_hh_l0" elif "weight_ih_l1" in name: _a = "weight_ih_l1" elif "weight_hh_l1" in name: _a = "weight_hh_l1" elif "bias_ih_l1" in name: _a = "bias_ih_l1" elif "bias_hh_l1" in name: _a = "bias_hh_l1" elif "bias" in name: _a = "bias" elif "weight" in name: _a = "weight" elif "running_mean" in name: _a = "running_mean" elif "running_var" in name: _a = "running_var" elif "num_batches_tracked" in name: _a = "num_batches_tracked" else: _a = None set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ): if config_path is not None: _a = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: _a = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": _a = [8, 5, 4, 4] _a = [2.2] _a = 64 _a = 32_000 _a = 2_048 _a = False _a = False _a = False elif model_name == "encodec_48khz": _a = [8, 5, 4, 2] _a = [3.0, 6.0, 12.0, 24.0] _a = 48_000 _a = 2 _a = False _a = "time_group_norm" _a = True _a = 1.0 _a = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) _a = EncodecModel(lowerCamelCase__ ) _a = EncodecFeatureExtractor( feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, ) feature_extractor.save_pretrained(lowerCamelCase__ ) _a = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights _a = original_checkpoint["best_state"] recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": __snake_case : Tuple = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __snake_case : List[Any] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
691
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __snake_case : Optional[Any] = 12_8022 __snake_case : List[str] = 12_8028 @require_sentencepiece class A ( a , unittest.TestCase ): __UpperCAmelCase : List[Any] = MaMaaaTokenizer __UpperCAmelCase : int = False __UpperCAmelCase : str = False __UpperCAmelCase : Tuple = True def __lowerCAmelCase ( self ) -> Any: super().setUp() _a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] _a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _a = Path(self.tmpdirname ) save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) _a = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **snake_case_ ) -> str: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def __lowerCAmelCase ( self , snake_case_ ) -> Tuple: return ( "This is a test", "This is a test", ) def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "</s>" _a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: _a = self.get_tokenizer() _a = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __lowerCAmelCase ( self ) -> Any: pass def __lowerCAmelCase ( self ) -> Dict: _a = self.get_tokenizer() _a = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , ) _a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) _a = tokenizer.convert_tokens_to_string(snake_case_ ) self.assertEqual(snake_case_ , "This is a test" ) @slow def __lowerCAmelCase ( self ) -> List[Any]: # fmt: off _a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): __UpperCAmelCase : Any = """facebook/m2m100_418M""" __UpperCAmelCase : Dict = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] __UpperCAmelCase : Optional[Any] = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off __UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __lowerCAmelCase ( cls ) -> int: _a = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) _a = 1 return cls def __lowerCAmelCase ( self ) -> Any: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = self.tokenizer.get_vocab() self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: _a = "en" _a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: self.assertIn(snake_case_ , self.tokenizer.all_special_ids ) # fmt: off _a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on _a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) _a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertNotIn(self.tokenizer.eos_token , snake_case_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = tempfile.mkdtemp() _a = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(snake_case_ ) _a = MaMaaaTokenizer.from_pretrained(snake_case_ ) self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: _a = "en" _a = "fr" _a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" ) _a = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: _a = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: _a = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) _a = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _a = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) _a = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __lowerCAmelCase ( self ) -> int: _a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(snake_case_ ) , { # en_XX, A, test, EOS "input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 1_2_8_0_0_6, } , )
691
1
'''simple docstring''' from __future__ import annotations def _lowercase ( lowerCamelCase__ : list[int | str] ): create_state_space_tree(lowerCamelCase__, [], 0, [0 for i in range(len(lowerCamelCase__ ) )] ) def _lowercase ( lowerCamelCase__ : list[int | str], lowerCamelCase__ : list[int | str], lowerCamelCase__ : int, lowerCamelCase__ : list[int], ): if index == len(lowerCamelCase__ ): print(lowerCamelCase__ ) return for i in range(len(lowerCamelCase__ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _a = True create_state_space_tree(lowerCamelCase__, lowerCamelCase__, index + 1, lowerCamelCase__ ) current_sequence.pop() _a = False __snake_case : list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) __snake_case : list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_a)
691
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Tuple = logging.get_logger(__name__) __snake_case : int = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class A ( a ): __UpperCAmelCase : Union[str, Any] = """wav2vec2""" def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]: super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # adapter _a = add_adapter _a = adapter_kernel_size _a = adapter_stride _a = num_adapter_layers _a = output_hidden_size or hidden_size _a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _a = list(snake_case_ ) _a = list(snake_case_ ) _a = list(snake_case_ ) _a = xvector_output_dim @property def __lowerCAmelCase ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
691
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: _a = 0 def __lowerCAmelCase ( self ) -> int: _a = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: _a = Path(snake_case_ ) / "preprocessor_config.json" _a = Path(snake_case_ ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , ) json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) ) _a = AutoImageProcessor.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> List[str]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _a = Path(snake_case_ ) / "preprocessor_config.json" _a = Path(snake_case_ ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , ) json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) ) _a = AutoImageProcessor.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: _a = CLIPConfig() # Create a dummy config file with image_proceesor_type _a = Path(snake_case_ ) / "preprocessor_config.json" _a = Path(snake_case_ ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , ) json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _a = AutoImageProcessor.from_pretrained(snake_case_ ).to_dict() config_dict.pop("image_processor_type" ) _a = CLIPImageProcessor(**snake_case_ ) # save in new folder model_config.save_pretrained(snake_case_ ) config.save_pretrained(snake_case_ ) _a = AutoImageProcessor.from_pretrained(snake_case_ ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdirname: _a = Path(snake_case_ ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , ) _a = AutoImageProcessor.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: with self.assertRaisesRegex( snake_case_ , "clip-base is not a local folder and is not a valid model identifier" ): _a = AutoImageProcessor.from_pretrained("clip-base" ) def __lowerCAmelCase ( self ) -> Dict: with self.assertRaisesRegex( snake_case_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoImageProcessor.from_pretrained(snake_case_ , revision="aaaaaa" ) def __lowerCAmelCase ( self ) -> List[Any]: with self.assertRaisesRegex( snake_case_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def __lowerCAmelCase ( self ) -> str: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case_ ): _a = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case_ ): _a = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ ) _a = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case_ ) _a = AutoImageProcessor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def __lowerCAmelCase ( self ) -> Tuple: try: AutoConfig.register("custom" , snake_case_ ) AutoImageProcessor.register(snake_case_ , snake_case_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case_ ): AutoImageProcessor.register(snake_case_ , snake_case_ ) with tempfile.TemporaryDirectory() as tmpdirname: _a = Path(snake_case_ ) / "preprocessor_config.json" _a = Path(snake_case_ ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , ) json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) ) _a = CustomImageProcessor.from_pretrained(snake_case_ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case_ ) _a = AutoImageProcessor.from_pretrained(snake_case_ ) self.assertIsInstance(snake_case_ , snake_case_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[Any]: class A ( a ): __UpperCAmelCase : str = True try: AutoConfig.register("custom" , snake_case_ ) AutoImageProcessor.register(snake_case_ , snake_case_ ) # If remote code is not set, the default is to use local _a = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _a = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _a = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(snake_case_ , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number | (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number & ~(1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return number ^ (1 << position) def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return ((number >> position) & 1) == 1 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
691
1
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class A : __UpperCAmelCase : torch.Tensor # [batch_size x 3] __UpperCAmelCase : torch.Tensor # [batch_size x 3] __UpperCAmelCase : torch.Tensor # [batch_size x 3] __UpperCAmelCase : torch.Tensor # [batch_size x 3] __UpperCAmelCase : int __UpperCAmelCase : int __UpperCAmelCase : float __UpperCAmelCase : float __UpperCAmelCase : Tuple[int] def __lowerCAmelCase ( self ) -> Dict: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def __lowerCAmelCase ( self ) -> int: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def __lowerCAmelCase ( self ) -> Any: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def __lowerCAmelCase ( self ) -> torch.Tensor: _a = torch.arange(self.height * self.width ) _a = torch.stack( [ pixel_indices % self.width, torch.div(snake_case_ , self.width , rounding_mode="trunc" ), ] , axis=1 , ) return coords @property def __lowerCAmelCase ( self ) -> Any: _a , *_a = self.shape _a = int(np.prod(snake_case_ ) ) _a = self.get_image_coords() _a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) _a = self.get_camera_rays(snake_case_ ) _a = rays.view(snake_case_ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def __lowerCAmelCase ( self , snake_case_ ) -> torch.Tensor: _a , *_a , _a = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] _a = coords.view(snake_case_ , -1 , 2 ) _a = self.resolution() _a = self.fov() _a = (flat.float() / (res - 1)) * 2 - 1 _a = fracs * torch.tan(fov / 2 ) _a = fracs.view(snake_case_ , -1 , 2 ) _a = ( self.z.view(snake_case_ , 1 , 3 ) + self.x.view(snake_case_ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(snake_case_ , 1 , 3 ) * fracs[:, :, 1:] ) _a = directions / directions.norm(dim=-1 , keepdim=snake_case_ ) _a = torch.stack( [ torch.broadcast_to(self.origin.view(snake_case_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(snake_case_ , *snake_case_ , 2 , 3 ) def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case_ , height=snake_case_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def _lowercase ( lowerCamelCase__ : int ): _a = [] _a = [] _a = [] _a = [] for theta in np.linspace(0, 2 * np.pi, num=20 ): _a = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) _a = -z * 4 _a = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] ) _a = np.cross(lowerCamelCase__, lowerCamelCase__ ) origins.append(lowerCamelCase__ ) xs.append(lowerCamelCase__ ) ys.append(lowerCamelCase__ ) zs.append(lowerCamelCase__ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), x=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), y=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), z=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), width=lowerCamelCase__, height=lowerCamelCase__, x_fov=0.7, y_fov=0.7, shape=(1, len(lowerCamelCase__ )), )
691
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __snake_case : List[Any] = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ): _a = True while ask_again: _a = input(lowerCamelCase__ ) try: if default is not None and len(lowerCamelCase__ ) == 0: return default return convert_value(lowerCamelCase__ ) if convert_value is not None else result except Exception: if error_message is not None: print(lowerCamelCase__ ) def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ): _a = BulletMenu(lowerCamelCase__, lowerCamelCase__ ) _a = menu.run(default_choice=lowerCamelCase__ ) return convert_value(lowerCamelCase__ ) if convert_value is not None else result def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _lowercase ( lowerCamelCase__ : Dict ): _a = int(lowerCamelCase__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _lowercase ( lowerCamelCase__ : List[Any] ): _a = int(lowerCamelCase__ ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _lowercase ( lowerCamelCase__ : str ): _a = int(lowerCamelCase__ ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _lowercase ( lowerCamelCase__ : str ): return {"yes": True, "no": False}[value.lower()] class A ( argparse.RawDescriptionHelpFormatter ): def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int: _a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) _a = usage.replace("<command> [<args>] " , "" ) return usage
691
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case : Dict = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : int = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
691
'''simple docstring''' def _lowercase ( lowerCamelCase__ : list[list] ): _a = current_set.copy() for row_index, row in enumerate(lowerCamelCase__ ): _a = row[0] for column_index, column in enumerate(lowerCamelCase__ ): if magnitude == 0: _a = column continue _a = column / magnitude # Subtract to cancel term _a = current_set[0] _a = [first_row] _a = current_set[1::] for row in current_set: _a = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCamelCase__ ) continue for column_index in range(len(lowerCamelCase__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCamelCase__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a = final_set[0] _a = [] _a = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a = simplify(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, lowerCamelCase__ ) _a = resultant return final_set def _lowercase ( lowerCamelCase__ : list[list] ): if len(lowerCamelCase__ ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) _a = len(lowerCamelCase__ ) + 1 if any(len(lowerCamelCase__ ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(lowerCamelCase__ ) == 1: return [equations[0][-1] / equations[0][0]] _a = equations.copy() if any(0 in row for row in data_set ): _a = data_set.copy() _a = [] for row_index, row in enumerate(lowerCamelCase__ ): if 0 not in row: _a = data_set.pop(lowerCamelCase__ ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0, lowerCamelCase__ ) _a = data_set.copy() _a = simplify(lowerCamelCase__ ) _a = simplified[::-1] _a = [] for row in simplified: _a = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a = row.copy()[: len(lowerCamelCase__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCamelCase__ ) == 0: solutions.append(0 ) continue _a = temp_row[1::] _a = temp_row[::-1] for column_index, column in enumerate(lowerCamelCase__ ): current_solution -= column * solutions[column_index] solutions.append(lowerCamelCase__ ) _a = [] for item in solutions: final.append(float(round(lowerCamelCase__, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __snake_case : Tuple = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
691
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A : @staticmethod def __lowerCAmelCase ( *snake_case_ , **snake_case_ ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class A ( unittest.TestCase ): @require_torch def __lowerCAmelCase ( self ) -> int: _a = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _a = image_classifier(snake_case_ , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(snake_case_ ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) _a = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case_ ) , [ [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], ] , ) @require_tf def __lowerCAmelCase ( self ) -> Any: _a = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _a = image_classifier(snake_case_ , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(snake_case_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) _a = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case_ ) , [ [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], [ {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, {"score": 0.333, "label": ANY(snake_case_ )}, ], ] , ) @slow @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _a = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _a = image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(snake_case_ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) _a = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case_ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __lowerCAmelCase ( self ) -> List[Any]: _a = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _a = image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(snake_case_ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) _a = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(snake_case_ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
691
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing return x.sum() def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing return i + 1 @dataclass class A : __UpperCAmelCase : int __UpperCAmelCase : str class A ( a ): def __lowerCAmelCase ( self ) -> Tuple: _a = {} _a = [] _a = 1 _a = [1, 2] _a = {"a": 1, "b": 2} _a = {"a": [1, 2], "b": [3, 4]} _a = {"a": {"1": 1}, "b": 2} _a = {"a": 1, "b": 2, "c": 3, "d": 4} _a = {} _a = [] _a = 2 _a = [2, 3] _a = {"a": 2, "b": 3} _a = {"a": [2, 3], "b": [4, 5]} _a = {"a": {"1": 2}, "b": 3} _a = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ ) _a = 2 self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ ) _a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} _a = {"a": 2, "b": 0, "c": 2} _a = { "a": np.eye(2 ).astype(snake_case_ ), "b": np.zeros(3 ).astype(snake_case_ ), "c": np.ones(2 ).astype(snake_case_ ), } self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(snake_case_ ): # can't pickle a local lambda map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ ) def __lowerCAmelCase ( self ) -> Any: _a = {"a": 1, "b": 2} _a = {"a": 3, "b": 4} _a = {"a": 5, "b": 6} _a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ ) def __lowerCAmelCase ( self ) -> str: class A : __UpperCAmelCase : Optional[int] = """bar""" _a = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(snake_case_ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc", [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ): with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: _a = {F'''{i}''': i for i in range(lowerCamelCase__ )} _a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A ( a ): @require_tf def __lowerCAmelCase ( self ) -> Any: import tensorflow as tf from tensorflow.keras import layers _a = layers.Dense(2 ) def gen_random_output(): _a = tf.random.uniform((1, 3) ) return model(snake_case_ ).numpy() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_tensorflow=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __lowerCAmelCase ( self ) -> Union[str, Any]: import torch def gen_random_output(): _a = torch.nn.Linear(3 , 2 ) _a = torch.rand(1 , 3 ) return model(snake_case_ ).detach().numpy() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() with temp_seed(4_2 , set_pytorch=snake_case_ ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __lowerCAmelCase ( self ) -> Optional[int]: def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(4_2 ): _a = gen_random_output() with temp_seed(4_2 ): _a = gen_random_output() _a = gen_random_output() np.testing.assert_equal(snake_case_ , snake_case_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data", [{}] ) def _lowercase ( lowerCamelCase__ : Any ): _a = NestedDataStructure(lowerCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output", [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ], ) def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ): _a = NestedDataStructure(lowerCamelCase__ ).flatten() assert output == expected_output def _lowercase ( ): _a = A(x=1, y="foobar" ) _a = {"x": 1, "y": "foobar"} assert asdict(lowerCamelCase__ ) == expected_output _a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]} _a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(lowerCamelCase__ ) == expected_output with pytest.raises(lowerCamelCase__ ): asdict([1, A(x=10, y="foo" )] ) def _lowercase ( lowerCamelCase__ : str ): return text.split() def _lowercase ( lowerCamelCase__ : List[Any] ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowercase ( ): with Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(lowerCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _a = [] for yield_time, content in iflatmap_unordered( lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowerCamelCase__ ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(lowerCamelCase__ ) == 4
691
1
'''simple docstring''' __snake_case : Dict = { "Pillow": "Pillow<10.0.0", "accelerate": "accelerate>=0.20.3", "av": "av==9.2.0", "beautifulsoup4": "beautifulsoup4", "black": "black~=23.1", "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", "datasets": "datasets!=2.5.0", "decord": "decord==0.6.0", "deepspeed": "deepspeed>=0.9.3", "diffusers": "diffusers", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", "filelock": "filelock", "flax": "flax>=0.4.1,<=0.7.0", "ftfy": "ftfy", "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.14.1,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2,<=0.4.13", "jaxlib": "jaxlib>=0.1.65,<=0.4.13", "jieba": "jieba", "kenlm": "kenlm", "keras-nlp": "keras-nlp>=0.3.1", "librosa": "librosa", "nltk": "nltk", "natten": "natten>=0.14.6", "numpy": "numpy>=1.17", "onnxconverter-common": "onnxconverter-common", "onnxruntime-tools": "onnxruntime-tools>=1.4.2", "onnxruntime": "onnxruntime>=1.4.0", "opencv-python": "opencv-python", "optuna": "optuna", "optax": "optax>=0.0.8,<=0.1.4", "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", "protobuf": "protobuf", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic<2", "pytest": "pytest>=7.2.0", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "python": "python>=3.8.0", "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", "rhoknp": "rhoknp>=1.1.0,<1.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "ruff": "ruff>=0.0.241,<=0.0.259", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "safetensors": "safetensors>=0.3.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "sigopt": "sigopt", "starlette": "starlette", "sudachipy": "sudachipy>=0.6.6", "sudachidict_core": "sudachidict_core>=20220729", "tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14", "tensorflow": "tensorflow>=2.6,<2.14", "tensorflow-text": "tensorflow-text<2.14", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.9,!=1.12.0", "torchaudio": "torchaudio", "torchvision": "torchvision", "pyctcdecode": "pyctcdecode>=0.4.0", "tqdm": "tqdm>=4.27", "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "urllib3": "urllib3<2.0.0", "uvicorn": "uvicorn", }
691
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings __snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(a ) class A ( a ): __UpperCAmelCase : Dict = """rag""" __UpperCAmelCase : Dict = True def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]: super().__init__( bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _a = kwargs.pop("question_encoder" ) _a = question_encoder_config.pop("model_type" ) _a = kwargs.pop("generator" ) _a = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = AutoConfig.for_model(snake_case_ , **snake_case_ ) _a = reduce_loss _a = label_smoothing _a = exclude_bos_score _a = do_marginalize _a = title_sep _a = doc_sep _a = n_docs _a = max_combined_length _a = dataset _a = dataset_split _a = index_name _a = retrieval_vector_size _a = retrieval_batch_size _a = passages_path _a = index_path _a = use_dummy_dataset _a = output_retrieved _a = do_deduplication _a = use_cache if self.forced_eos_token_id is None: _a = getattr(self.generator , "forced_eos_token_id" , snake_case_ ) @classmethod def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ ) def __lowerCAmelCase ( self ) -> Optional[int]: _a = copy.deepcopy(self.__dict__ ) _a = self.question_encoder.to_dict() _a = self.generator.to_dict() _a = self.__class__.model_type return output
691
1