code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = sorted(numsa + numsa ) A, A : Dict = divmod(len(UpperCamelCase_ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowercase : List[str] = [float(x) for x in input('Enter the elements of first array: ').split()] lowercase : int = [float(x) for x in input('Enter the elements of second array: ').split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
3
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
'''simple docstring''' from jiwer import compute_measures import datasets __lowercase : List[str] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' __lowercase : Any = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' __lowercase : int = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def UpperCAmelCase__ (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def UpperCAmelCase__ (self , A=None , A=None , A=False ): if concatenate_texts: return compute_measures(lowerCamelCase_ , lowerCamelCase_ )["wer"] else: lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : Dict = 0 for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ): lowerCamelCase_ : Optional[int] = compute_measures(lowerCamelCase_ , lowerCamelCase_ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
318
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
from typing import Any import numpy as np def __lowerCAmelCase ( a__ ) -> bool: return np.array_equal(UpperCamelCase_ , matrix.conjugate().T ) def __lowerCAmelCase ( a__ , a__ ) -> Any: __a = v.conjugate().T __a = v_star.dot(UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , np.ndarray ) return (v_star_dot.dot(UpperCamelCase_ )) / (v_star.dot(UpperCamelCase_ )) def __lowerCAmelCase ( ) -> None: __a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __a = np.array([[1], [2], [3]] ) assert is_hermitian(UpperCamelCase_ ), F"""{a} is not hermitian.""" print(rayleigh_quotient(UpperCamelCase_ , UpperCamelCase_ ) ) __a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(UpperCamelCase_ ), F"""{a} is not hermitian.""" assert rayleigh_quotient(UpperCamelCase_ , UpperCamelCase_ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
6
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : """simple docstring""" @staticmethod def UpperCamelCase__ ( *__a : int , **__a : Optional[Any] ): pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" __a =MODEL_FOR_OBJECT_DETECTION_MAPPING def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] , __a : int , __a : Union[str, Any] ): _a = ObjectDetectionPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def UpperCamelCase__ ( self : Dict , __a : str , __a : Union[str, Any] ): _a = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(lowerCamelCase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase_ , { "score": ANY(lowerCamelCase_ ), "label": ANY(lowerCamelCase_ ), "box": {"xmin": ANY(lowerCamelCase_ ), "ymin": ANY(lowerCamelCase_ ), "xmax": ANY(lowerCamelCase_ ), "ymax": ANY(lowerCamelCase_ )}, } , ) import datasets _a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _a = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] _a = object_detector(lowerCamelCase_ , threshold=0.0 ) self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for outputs in batch_outputs: self.assertGreater(len(lowerCamelCase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase_ , { "score": ANY(lowerCamelCase_ ), "label": ANY(lowerCamelCase_ ), "box": {"xmin": ANY(lowerCamelCase_ ), "ymin": ANY(lowerCamelCase_ ), "xmax": ANY(lowerCamelCase_ ), "ymax": ANY(lowerCamelCase_ )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def UpperCamelCase__ ( self : List[str] ): pass @require_torch def UpperCamelCase__ ( self : List[str] ): _a = "hf-internal-testing/tiny-detr-mobilenetsv3" _a = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ ) _a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) _a = ObjectDetectionPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) _a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, ] , ) _a = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}}, ], ] , ) @require_torch @slow def UpperCamelCase__ ( self : List[Any] ): _a = "facebook/detr-resnet-50" _a = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ ) _a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) _a = ObjectDetectionPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) _a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ] , ) _a = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], ] , ) @require_torch @slow def UpperCamelCase__ ( self : Optional[int] ): _a = "facebook/detr-resnet-50" _a = pipeline("object-detection" , model=lowerCamelCase_ ) _a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ] , ) _a = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ], ] , ) @require_torch @slow def UpperCamelCase__ ( self : Union[str, Any] ): _a = 0.9985 _a = "facebook/detr-resnet-50" _a = pipeline("object-detection" , model=lowerCamelCase_ ) _a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowerCamelCase_ ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}}, ] , ) @require_torch @require_pytesseract @slow def UpperCamelCase__ ( self : str ): _a = "Narsil/layoutlmv3-finetuned-funsd" _a = 0.9993 _a = pipeline("object-detection" , model=lowerCamelCase_ , threshold=lowerCamelCase_ ) _a = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}}, ] , )
63
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __lowerCAmelCase ,unittest.TestCase ): __lowerCamelCase : Any = AudioLDMPipeline __lowerCamelCase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS __lowerCamelCase : Dict = TEXT_TO_AUDIO_BATCH_PARAMS __lowerCamelCase : List[Any] = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def _snake_case ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase_ , ) _lowerCAmelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) _lowerCAmelCase = ClapTextModelWithProjection(lowerCamelCase_ ) _lowerCAmelCase = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 ) _lowerCAmelCase = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase_ , ) _lowerCAmelCase = SpeechTaHifiGan(lowerCamelCase_ ) _lowerCAmelCase = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Any: if str(lowerCamelCase_ ).startswith("mps" ): _lowerCAmelCase = torch.manual_seed(lowerCamelCase_ ) else: _lowerCAmelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) _lowerCAmelCase = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def _snake_case ( self ) -> int: _lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase_ ) == 256 _lowerCAmelCase = audio[:10] _lowerCAmelCase = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> Optional[int]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = 3 * [inputs["prompt"]] # forward _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = 3 * [inputs.pop("prompt" )] _lowerCAmelCase = audioldm_pipe.tokenizer( lowerCamelCase_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="pt" , ) _lowerCAmelCase = text_inputs["input_ids"].to(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.text_encoder( lowerCamelCase_ , ) _lowerCAmelCase = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _lowerCAmelCase = F.normalize(lowerCamelCase_ , dim=-1 ) _lowerCAmelCase = prompt_embeds # forward _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = 3 * ["this is a negative prompt"] _lowerCAmelCase = negative_prompt _lowerCAmelCase = 3 * [inputs["prompt"]] # forward _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = 3 * [inputs.pop("prompt" )] _lowerCAmelCase = [] for p in [prompt, negative_prompt]: _lowerCAmelCase = audioldm_pipe.tokenizer( lowerCamelCase_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="pt" , ) _lowerCAmelCase = text_inputs["input_ids"].to(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.text_encoder( lowerCamelCase_ , ) _lowerCAmelCase = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _lowerCAmelCase = F.normalize(lowerCamelCase_ , dim=-1 ) embeds.append(lowerCamelCase_ ) _lowerCAmelCase , _lowerCAmelCase = embeds # forward _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def _snake_case ( self ) -> int: _lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = "egg cracking" _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase_ ) == 256 _lowerCAmelCase = audio[:10] _lowerCAmelCase = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> str: _lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ ) _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) _lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts _lowerCAmelCase = 2 _lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt _lowerCAmelCase = 2 _lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts _lowerCAmelCase = 2 _lowerCAmelCase = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _snake_case ( self ) -> int: _lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate _lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.016 _lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **lowerCamelCase_ ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.032 def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = ["hey"] _lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 ) _lowerCAmelCase = output.audios.shape assert audio_shape == (1, 256) _lowerCAmelCase = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _lowerCAmelCase = SpeechTaHifiGan(lowerCamelCase_ ).to(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 ) _lowerCAmelCase = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _snake_case ( self ) -> List[str]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ ) def _snake_case ( self ) -> Optional[Any]: self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase_ ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _snake_case ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=0 ) -> Dict: _lowerCAmelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) _lowerCAmelCase = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 8, 128, 16) ) _lowerCAmelCase = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ ) _lowerCAmelCase = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def _snake_case ( self ) -> str: _lowerCAmelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = self.get_inputs(lowerCamelCase_ ) _lowerCAmelCase = 25 _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ).audios[0] assert audio.ndim == 1 assert len(lowerCamelCase_ ) == 81920 _lowerCAmelCase = audio[77230:77240] _lowerCAmelCase = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) _lowerCAmelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def _snake_case ( self ) -> Dict: _lowerCAmelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) _lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _lowerCAmelCase = audioldm_pipe.to(lowerCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) _lowerCAmelCase = self.get_inputs(lowerCamelCase_ ) _lowerCAmelCase = audioldm_pipe(**lowerCamelCase_ ).audios[0] assert audio.ndim == 1 assert len(lowerCamelCase_ ) == 81920 _lowerCAmelCase = audio[27780:27790] _lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) _lowerCAmelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
158
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def _UpperCAmelCase ( self : List[Any]): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = ort.SessionOptions() lowercase_ = False return options def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""") lowercase_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""") lowercase_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""") # using the PNDM scheduler by default lowercase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase_) lowercase_ = """A red cat sitting on a park bench""" lowercase_ = np.random.RandomState(0) lowercase_ = pipe( prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCamelCase_ , output_type="""np""" , ) lowercase_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image).max() < 1E-2
136
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
"""simple docstring""" def _snake_case ( _snake_case : Optional[int] = 10_00 ) -> int: '''simple docstring''' _A = 3 _A = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
315
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
199
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE : Any = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE : Any = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE : Any = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE : Dict = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE : str = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE : List[Any] = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE : Any = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE : str = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE : str = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE : List[Any] = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModel) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE : Dict = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE : str = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE : str = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE : Tuple = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class _UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
102
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def UpperCamelCase_( lowerCamelCase_ ) -> Dict: # word like '180' or '身高' or '神' for char in word: _lowercase : int = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]: _lowercase : Tuple = set() for token in tokens: _lowercase : Tuple = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) _lowercase : Dict = list(UpperCamelCase_ ) return word_list def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: if not chinese_word_set: return bert_tokens _lowercase : Any = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) _lowercase : str = bert_tokens _lowercase , _lowercase : Any = 0, len(UpperCamelCase_ ) while start < end: _lowercase : Any = True if is_chinese(bert_word[start] ): _lowercase : List[Any] = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): _lowercase : int = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _lowercase : Tuple = '##' + bert_word[j] _lowercase : Tuple = start + i _lowercase : List[str] = False break if single_word: start += 1 return bert_word def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str: _lowercase : int = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): _lowercase : Any = ltp_tokenizer.seg(lines[i : i + 100] )[0] _lowercase : Tuple = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) _lowercase : int = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): _lowercase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) _lowercase : Union[str, Any] = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): _lowercase : List[str] = [] for id in input_ids: _lowercase : Optional[Any] = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) _lowercase : Union[str, Any] = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) _lowercase : List[str] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": _lowercase : List[str] = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , 'r' , encoding='utf-8' ) as f: _lowercase : List[Any] = f.readlines() _lowercase : List[str] = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _lowercase : Optional[Any] = LTP(args.ltp ) # faster in GPU device _lowercase : Optional[Any] = BertTokenizer.from_pretrained(args.bert ) _lowercase : Dict = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: _lowercase : Any = [json.dumps(UpperCamelCase_ ) + '\n' for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") SCREAMING_SNAKE_CASE : int = parser.parse_args() main(args)
21
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCamelCase ( unittest.TestCase ): def __init__(self : List[str] , _A : Union[str, Any] , _A : List[str]=13 , _A : Any=7 , _A : Any=True , _A : List[str]=True , _A : Union[str, Any]=True , _A : Optional[int]=True , _A : List[Any]=99 , _A : Dict=32 , _A : Optional[int]=5 , _A : Tuple=4 , _A : Any=37 , _A : Any="gelu" , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[Any]=5_12 , _A : str=16 , _A : Any=2 , _A : int=0.02 , _A : Union[str, Any]=4 , ) -> Optional[Any]: __snake_case : int = parent __snake_case : Optional[Any] = batch_size __snake_case : Optional[int] = seq_length __snake_case : List[str] = is_training __snake_case : Optional[int] = use_attention_mask __snake_case : Optional[int] = use_token_type_ids __snake_case : Dict = use_labels __snake_case : int = vocab_size __snake_case : int = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Optional[Any] = intermediate_size __snake_case : Optional[Any] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Any = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : Tuple = initializer_range __snake_case : Dict = num_choices def _lowercase (self : List[str]) -> Dict: __snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case : str = None if self.use_attention_mask: __snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) __snake_case : Tuple = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case : List[str] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase (self : Tuple) -> List[str]: __snake_case : str = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : int = config_and_inputs __snake_case : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCamelCase ( __lowerCAmelCase , unittest.TestCase ): UpperCAmelCase : Any = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase (self : List[Any]) -> Tuple: __snake_case : List[str] = FlaxAlbertModelTester(self) @slow def _lowercase (self : str) -> Tuple: for model_class_name in self.all_model_classes: __snake_case : int = model_class_name.from_pretrained('albert-base-v2') __snake_case : Optional[Any] = model(np.ones((1, 1))) self.assertIsNotNone(lowerCamelCase_) @require_flax class UpperCamelCase ( unittest.TestCase ): @slow def _lowercase (self : Dict) -> Tuple: __snake_case : Union[str, Any] = FlaxAlbertModel.from_pretrained('albert-base-v2') __snake_case : Union[str, Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]]) __snake_case : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) __snake_case : Dict = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0] __snake_case : str = (1, 11, 7_68) self.assertEqual(output.shape , lowerCamelCase_) __snake_case : List[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4))
172
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def lowerCAmelCase_ ( snake_case__ , snake_case__=None ): '''simple docstring''' A : Any = None if token is not None: A : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'} A : Optional[int] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' A : int = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json() A : Tuple = {} try: job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) A : Any = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(UpperCamelCase_ ): A : Any = requests.get(url + F'&page={i + 2}' , headers=UpperCamelCase_ ).json() job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return job_links except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def lowerCAmelCase_ ( snake_case__ , snake_case__=None ): '''simple docstring''' A : Optional[Any] = None if token is not None: A : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'} A : Tuple = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100' A : int = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json() A : str = {} try: artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) A : List[str] = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(UpperCamelCase_ ): A : str = requests.get(url + F'&page={i + 2}' , headers=UpperCamelCase_ ).json() artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) return artifacts except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = None if token is not None: A : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'} A : Any = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ ) A : Any = result.headers['''Location'''] A : Any = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ ) A : Union[str, Any] = os.path.join(UpperCamelCase_ , F'{artifact_name}.zip' ) with open(UpperCamelCase_ , '''wb''' ) as fp: fp.write(response.content ) def lowerCAmelCase_ ( snake_case__ , snake_case__=None ): '''simple docstring''' A : Optional[int] = [] A : Dict = [] A : Any = None with zipfile.ZipFile(UpperCamelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCamelCase_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(UpperCamelCase_ ) as f: for line in f: A : List[str] = line.decode('''UTF-8''' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs A : Optional[Any] = line[: line.index(''': ''' )] A : List[Any] = line[line.index(''': ''' ) + len(''': ''' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ): # `test` is the test method that failed A : int = line[len('''FAILED ''' ) :] failed_tests.append(UpperCamelCase_ ) elif filename == "job_name.txt": A : Optional[Any] = line if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` ' F'and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some' ''' problem.''' ) A : List[str] = None if job_name and job_links: A : Union[str, Any] = job_links.get(UpperCamelCase_ , UpperCamelCase_ ) # A list with elements of the form (line of error, error, failed test) A : str = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )] return result def lowerCAmelCase_ ( snake_case__ , snake_case__=None ): '''simple docstring''' A : Optional[Any] = [] A : Optional[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith('''.zip''' )] for p in paths: errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) ) return errors def lowerCAmelCase_ ( snake_case__ , snake_case__=None ): '''simple docstring''' A : Optional[int] = Counter() counter.update([x[1] for x in logs] ) A : int = counter.most_common() A : List[str] = {} for error, count in counts: if error_filter is None or error not in error_filter: A : List[str] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]} A : Tuple = dict(sorted(r.items() , key=lambda snake_case__ : item[1]["count"] , reverse=UpperCamelCase_ ) ) return r def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = test.split('''::''' )[0] if test.startswith('''tests/models/''' ): A : Tuple = test.split('''/''' )[2] else: A : Any = None return test def lowerCAmelCase_ ( snake_case__ , snake_case__=None ): '''simple docstring''' A : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs] A : List[str] = [x for x in logs if x[2] is not None] A : Union[str, Any] = {x[2] for x in logs} A : Optional[int] = {} for test in tests: A : Union[str, Any] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) A : List[str] = counter.most_common() A : Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} A : int = sum(error_counts.values() ) if n_errors > 0: A : Union[str, Any] = {'''count''': n_errors, '''errors''': error_counts} A : Union[str, Any] = dict(sorted(r.items() , key=lambda snake_case__ : item[1]["count"] , reverse=UpperCamelCase_ ) ) return r def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = '''| no. | error | status |''' A : Dict = '''|-:|:-|:-|''' A : Tuple = [header, sep] for error in reduced_by_error: A : Optional[int] = reduced_by_error[error]['''count'''] A : Optional[Any] = F'| {count} | {error[:100]} | |' lines.append(UpperCamelCase_ ) return "\n".join(UpperCamelCase_ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = '''| model | no. of errors | major error | count |''' A : List[str] = '''|-:|-:|-:|-:|''' A : int = [header, sep] for model in reduced_by_model: A : Optional[int] = reduced_by_model[model]['''count'''] A, A : List[str] = list(reduced_by_model[model]['''errors'''].items() )[0] A : str = F'| {model} | {count} | {error[:60]} | {_count} |' lines.append(UpperCamelCase_ ) return "\n".join(UpperCamelCase_ ) if __name__ == "__main__": lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') lowercase : Optional[int] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowercase : List[Any] = get_job_links(args.workflow_run_id, token=args.token) lowercase : List[str] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowercase : int = k.find(' / ') lowercase : str = k[index + len(' / ') :] lowercase : Union[str, Any] = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowercase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowercase : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowercase : Optional[int] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowercase : Any = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowercase : Dict = reduce_by_error(errors) lowercase : str = reduce_by_model(errors) lowercase : str = make_github_table(reduced_by_error) lowercase : Optional[Any] = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
3
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __lowercase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def lowercase_ ( ) -> Any: '''simple docstring''' lowerCamelCase_ : List[Any] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ : List[str] = get_sagemaker_input() else: lowerCamelCase_ : Optional[int] = get_cluster_input() return config def lowercase_ ( _lowercase=None ) -> Dict: '''simple docstring''' if subparsers is not None: lowerCamelCase_ : List[str] = subparsers.add_parser('''config''' , description=UpperCamelCase_ ) else: lowerCamelCase_ : str = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCamelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCamelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase_ ) return parser def lowercase_ ( _lowercase ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ : List[str] = get_user_input() if args.config_file is not None: lowerCamelCase_ : List[str] = args.config_file else: if not os.path.isdir(UpperCamelCase_ ): os.makedirs(UpperCamelCase_ ) lowerCamelCase_ : List[Any] = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(UpperCamelCase_ ) else: config.to_yaml_file(UpperCamelCase_ ) print(F"""accelerate configuration saved at {config_file}""" ) def lowercase_ ( ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ : List[Any] = config_command_parser() lowerCamelCase_ : List[str] = parser.parse_args() config_command(UpperCamelCase_ ) if __name__ == "__main__": main()
318
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Optional[Any] = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
'''simple docstring''' import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ : str = 'bert-base-cased' lowerCAmelCase_ : List[str] = 'google/pegasus-xsum' lowerCAmelCase_ : Optional[Any] = [' Sam ate lunch today.', 'Sams lunch ingredients.'] lowerCAmelCase_ : int = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowerCAmelCase_ : int = 'patrickvonplaten/t5-tiny-random' lowerCAmelCase_ : Tuple = 'sshleifer/bart-tiny-random' lowerCAmelCase_ : str = 'sshleifer/tiny-mbart' lowerCAmelCase_ : int = 'sshleifer/tiny-marian-en-de' def _lowerCamelCase ( lowercase : int , lowercase : str ) -> List[str]: _a = "\n".join(UpperCamelCase_ ) Path(UpperCamelCase_ ).open("w" ).writelines(UpperCamelCase_ ) def _lowerCamelCase ( lowercase : str ) -> List[str]: for split in ["train", "val", "test"]: _dump_articles(os.path.join(UpperCamelCase_ , F'{split}.source' ) , UpperCamelCase_ ) _dump_articles(os.path.join(UpperCamelCase_ , F'{split}.target' ) , UpperCamelCase_ ) return tmp_dir class __SCREAMING_SNAKE_CASE (__lowerCAmelCase ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ): _a = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _a = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _a = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _a = 4 _a = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _a , _a = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. _a = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path="train" , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , ) _a = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _a = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def UpperCamelCase__ ( self : Dict , __a : Tuple ): _a = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _a = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES ) _a = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES ) _a = 4 _a = LegacySeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path="train" , max_source_length=20 , max_target_length=lowerCamelCase_ , ) _a = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def UpperCamelCase__ ( self : Optional[Any] ): _a = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) _a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _a = tmp_dir.joinpath("train.source" ).open().readlines() _a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ ) _a = {x.name for x in tmp_dir.iterdir()} _a = {x.name for x in save_dir.iterdir()} _a = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCamelCase_ ) < len(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def UpperCamelCase__ ( self : Union[str, Any] ): if not FAIRSEQ_AVAILABLE: return _a , _a , _a = self._get_dataset(max_len=64 ) _a = 64 _a = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ ) _a = [len(lowerCamelCase_ ) for x in batch_sampler] assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples _a = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _a = [] _a = [] for batch in data_loader: _a = batch["input_ids"].shape _a = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _a = np.product(batch["input_ids"].shape ) num_src_per_batch.append(lowerCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCamelCase_ ) assert num_src_per_batch[0] == max(lowerCamelCase_ ) if failures: raise AssertionError(f'too many tokens in {len(lowerCamelCase_ )} batches' ) def UpperCamelCase__ ( self : Any ): _a , _a , _a = self._get_dataset(max_len=5_12 ) _a = 2 _a = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ ) _a = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) _a = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ ) _a = tokenizer.pad_token_id def count_pad_tokens(__a : List[Any] , __a : str="input_ids" ): return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCamelCase_ , k="labels" ) ) < sum(count_pad_tokens(lowerCamelCase_ , k="labels" ) ) assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) def UpperCamelCase__ ( self : Optional[int] , __a : Union[str, Any]=10_00 , __a : Optional[Any]=1_28 ): if os.getenv("USE_REAL_DATA" , lowerCamelCase_ ): _a = "examples/seq2seq/wmt_en_ro" _a = max_len * 2 * 64 if not Path(lowerCamelCase_ ).joinpath("train.len" ).exists(): save_len_file(lowerCamelCase_ , lowerCamelCase_ ) else: _a = "examples/seq2seq/test_data/wmt_en_ro" _a = max_len * 4 save_len_file(lowerCamelCase_ , lowerCamelCase_ ) _a = AutoTokenizer.from_pretrained(lowerCamelCase_ ) _a = SeqaSeqDataset( lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path="train" , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , ) return ds, max_tokens, tokenizer def UpperCamelCase__ ( self : Optional[Any] ): _a , _a , _a = self._get_dataset() _a = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) ) _a = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) ) assert idsa.intersection(lowerCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def UpperCamelCase__ ( self : Tuple , __a : str ): _a = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ ) if tok_name == MBART_TINY: _a = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) _a = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _a = SeqaSeqDataset( lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) _a = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
63
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
'''simple docstring''' from __future__ import annotations def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' create_state_space_tree(UpperCamelCase_ , [] , 0 , [0 for i in range(len(UpperCamelCase_ ) )] ) def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , ): '''simple docstring''' if index == len(UpperCamelCase_ ): print(UpperCamelCase_ ) return for i in range(len(UpperCamelCase_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) _lowerCAmelCase = True create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 , UpperCamelCase_ ) current_sequence.pop() _lowerCAmelCase = False _SCREAMING_SNAKE_CASE = [3, 1, 2, 4] generate_all_permutations(sequence) _SCREAMING_SNAKE_CASE = ["A", "B", "C"] generate_all_permutations(sequence_a)
158
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Tuple=4 , ): """simple docstring""" lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_attention_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_choices def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase_ = None if self.use_attention_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length]) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase_ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = True lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , unittest.TestCase ): lowercase__ = True lowercase__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self : Any): """simple docstring""" lowercase_ = FlaxRobertaPreLayerNormModelTester(self) @slow def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" for model_class_name in self.all_model_classes: lowercase_ = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_) lowercase_ = model(np.ones((1, 1))) self.assertIsNotNone(lowerCamelCase_) @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def _UpperCAmelCase ( self : str): """simple docstring""" lowercase_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_) lowercase_ = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) lowercase_ = model(lowerCamelCase_)[0] lowercase_ = [1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape) , lowerCamelCase_) # compare the actual values for a slice. lowercase_ = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_367], [-1.8_877, -4.0_885, 1_0.7_0_8_5], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4)) @slow def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_) lowercase_ = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) lowercase_ = model(lowerCamelCase_)[0] # compare the actual values for a slice. lowercase_ = np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4))
136
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=lowerCamelCase_ , ) assert hasattr(self , 'env' ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict=1 ): return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str ): TrainingJobAnalytics(lowerCamelCase_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.create_estimator() # run training estimator.fit() # result dataframe _A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) _A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCamelCase_ )
315
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( __lowerCAmelCase , unittest.TestCase ): UpperCamelCase__ : int =MgpstrTokenizer UpperCamelCase__ : Optional[int] =False UpperCamelCase__ : List[str] ={} UpperCamelCase__ : str =False def lowerCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" super().setUp() # fmt: off _lowerCamelCase : Dict =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on _lowerCamelCase : str =dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) _lowerCamelCase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + '\n' ) def lowerCamelCase ( self : str , **lowercase_ : int ) -> Tuple: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] ) -> Any: """simple docstring""" _lowerCamelCase : int ='tester' _lowerCamelCase : Dict ='tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def lowerCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" pass def lowerCamelCase ( self : Any ) -> Dict: """simple docstring""" _lowerCamelCase : Dict =self.get_tokenizers(do_lower_case=lowerCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowerCamelCase : List[Any] ='[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token} ) _lowerCamelCase : Optional[int] =tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , 1 ) _lowerCamelCase : Optional[Any] =tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _lowerCamelCase : Optional[int] =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _lowerCamelCase , _lowerCamelCase : Dict =self.get_input_output_texts(lowerCamelCase_ ) _lowerCamelCase : Tuple =tokenizer.tokenize(lowerCamelCase_ ) _lowerCamelCase : Tuple =tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) _lowerCamelCase : List[str] =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) _lowerCamelCase : List[Any] =tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertNotEqual(len(lowerCamelCase_ ) , 0 ) _lowerCamelCase : str =tokenizer.decode(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(text_a.replace(' ' , '' ) , lowerCamelCase_ ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def lowerCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def lowerCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" pass
199
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
"""simple docstring""" from math import ceil, sqrt def lowercase ( _snake_case : int = 1_000_000 ) ->int: """simple docstring""" __snake_case : List[Any] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __snake_case : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __snake_case : Optional[int] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'{solution() = }')
102
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _lowerCamelCase( __lowerCAmelCase ): lowercase_ : Dict = """audio-spectrogram-transformer""" def __init__( self, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=16, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=10, lowerCamelCase=10_24, lowerCamelCase=1_28, **lowerCamelCase, ) -> Dict: """simple docstring""" super().__init__(**lowerCamelCase_) _lowercase : List[str] = hidden_size _lowercase : Optional[int] = num_hidden_layers _lowercase : Any = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : int = hidden_act _lowercase : str = hidden_dropout_prob _lowercase : List[Any] = attention_probs_dropout_prob _lowercase : List[Any] = initializer_range _lowercase : Any = layer_norm_eps _lowercase : List[str] = patch_size _lowercase : Dict = qkv_bias _lowercase : int = frequency_stride _lowercase : Dict = time_stride _lowercase : int = max_length _lowercase : Optional[Any] = num_mel_bins
21
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class UpperCamelCase ( unittest.TestCase ): def _lowercase (self : Tuple) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=lowerCamelCase_ , ) assert hasattr(self , 'env') def _lowercase (self : Any , _A : List[str]) -> str: __snake_case : List[str] = { 'enabled': True, 'processes_per_host': 8, } __snake_case : Tuple = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } __snake_case : Optional[Any] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} __snake_case : Union[str, Any] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 5_00, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version='py36' , ) def _lowercase (self : List[str] , _A : List[Any]) -> Dict: TrainingJobAnalytics(lowerCamelCase_).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") @parameterized.expand([(1,)]) def _lowercase (self : Union[str, Any] , _A : int) -> str: __snake_case : Optional[int] = self.create_estimator(lowerCamelCase_) # run training estimator.fit() # result dataframe __snake_case : int = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis __snake_case : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value']) __snake_case : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value']) # get train time from SageMaker job, this includes starting, preprocessing, stopping __snake_case : int = ( Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy) assert all(t <= self.results['eval_loss'] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , 'w') as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCamelCase_)
172
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): A : str = F'Input value of [number={number}] must be an integer' raise TypeError(UpperCamelCase_ ) if number < 0: return False A : Optional[Any] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
3
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
'''simple docstring''' import math from collections.abc import Callable def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float: '''simple docstring''' lowerCamelCase_ : Dict = xa lowerCamelCase_ : Tuple = xa while True: if x_n == x_na or function(UpperCamelCase_ ) == function(UpperCamelCase_ ): raise ZeroDivisionError('''float division by zero, could not find root''' ) lowerCamelCase_ : List[str] = x_na - ( function(UpperCamelCase_ ) / ((function(UpperCamelCase_ ) - function(UpperCamelCase_ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na lowerCamelCase_ : List[str] = x_na lowerCamelCase_ : str = x_na def lowercase_ ( _lowercase ) -> float: '''simple docstring''' return math.pow(UpperCamelCase_ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
318
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 A : Dict = get_tests_dir('fixtures') A : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') A : List[str] = get_tests_dir('fixtures/dummy-config.json') class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = 0 def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop('''feature_extractor_type''' ) __a = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved __a = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): __a = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision='''aaaaaa''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( lowerCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): __a = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): __a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): __a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) __a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' try: AutoConfig.register('''custom''' , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API __a = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) __a = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]: '''simple docstring''' class __A( __lowerCAmelCase ): snake_case_ = True try: AutoConfig.register('''custom''' , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local __a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __a = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(lowerCamelCase_ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
6
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Tuple , __a : Dict , __a : Optional[int]=13 , __a : Union[str, Any]=3 , __a : List[str]=True , __a : Dict=True , __a : str=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[Any]=2_24 , __a : List[Any]=10_00 , __a : Dict=[3, 3, 6, 4] , __a : Optional[int]=[48, 56, 1_12, 2_20] , ): _a = parent _a = batch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = num_labels _a = image_size _a = layer_depths _a = embed_dims def UpperCamelCase__ ( self : List[str] ): _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.num_labels ) _a = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self : Optional[int] ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCamelCase_ , layer_scale_init_value=1e-5 , ) def UpperCamelCase__ ( self : str , __a : Tuple , __a : Union[str, Any] , __a : Tuple ): _a = SwiftFormerModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _a = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def UpperCamelCase__ ( self : Optional[Any] , __a : List[str] , __a : List[Any] , __a : Dict ): _a = self.num_labels _a = SwiftFormerForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _a = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) _a = SwiftFormerForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self : Dict ): ((_a) , (_a) , (_a)) = self.prepare_config_and_inputs() _a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): """simple docstring""" __a =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __a =( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) __a =False __a =False __a =False __a =False __a =False def UpperCamelCase__ ( self : Union[str, Any] ): _a = SwiftFormerModelTester(self ) _a = ConfigTester( self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def UpperCamelCase__ ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds" ) def UpperCamelCase__ ( self : Tuple ): pass def UpperCamelCase__ ( self : Dict ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(lowerCamelCase_ ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(lowerCamelCase_ ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def UpperCamelCase__ ( self : Dict ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def UpperCamelCase__ ( self : int ): _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def UpperCamelCase__ ( self : Tuple ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = SwiftFormerModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @unittest.skip(reason="SwiftFormer does not output attentions" ) def UpperCamelCase__ ( self : Union[str, Any] ): pass def UpperCamelCase__ ( self : List[str] ): def check_hidden_states_output(__a : int , __a : List[str] , __a : int ): _a = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) _a = outputs.hidden_states _a = 8 self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowerCamelCase_ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase__ ( self : str ): def _config_zero_init(__a : List[Any] ): _a = copy.deepcopy(lowerCamelCase_ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowerCamelCase_ , lowerCamelCase_ , 1e-1_0 ) if isinstance(getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ): _a = _config_zero_init(getattr(lowerCamelCase_ , lowerCamelCase_ ) ) setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return configs_no_init _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = _config_zero_init(lowerCamelCase_ ) for model_class in self.all_model_classes: _a = model_class(config=lowerCamelCase_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCamelCase__ ( self : Any ): pass def _lowerCamelCase ( ) -> Any: _a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase__ ( self : Union[str, Any] ): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None @slow def UpperCamelCase__ ( self : Optional[int] ): _a = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCamelCase_ ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): _a = model(**lowerCamelCase_ ) # verify the logits _a = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _a = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
63
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __lowerCAmelCase ): __lowerCamelCase : Any = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = 8 , **_lowerCAmelCase , ) -> Dict: super().__init__(**lowerCamelCase_ ) _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_pad _lowerCAmelCase = pad_size def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase ) -> List[str]: return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ) -> Any: _lowerCAmelCase , _lowerCAmelCase = get_image_size(lowerCamelCase_ ) _lowerCAmelCase = (old_height // size + 1) * size - old_height _lowerCAmelCase = (old_width // size + 1) * size - old_width return pad(lowerCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase_ ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_pad if do_pad is not None else self.do_pad _lowerCAmelCase = pad_size if pad_size is not None else self.pad_size _lowerCAmelCase = make_list_of_images(lowerCamelCase_ ) if not valid_images(lowerCamelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(lowerCamelCase_ ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images] if do_pad: _lowerCAmelCase = [self.pad(lowerCamelCase_ , size=lowerCamelCase_ ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
158
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any: '''simple docstring''' lowercase_ = checkpoints.load_tax_checkpoint(UpperCamelCase_ ) lowercase_ = flatten_dict(UpperCamelCase_ ) return flax_params def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ = {} lowercase_ = { """token_embedder""": """embeddings""", """encoder_norm""": """layernorm""", """kernel""": """weight""", """.out""": """.output""", """scale""": """weight""", """embedders_0.pos_embedding""": """row_embedder.weight""", """embedders_1.pos_embedding""": """column_embedder.weight""", } lowercase_ = { """query""": """attention.query""", """key""": """attention.key""", """value""": """attention.value""", """output.dense""": """output""", """encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""", """pre_self_attention_layer_norm""": """self_attention.layer_norm""", """pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""", """mlp.""": """mlp.DenseReluDense.""", """pre_mlp_layer_norm""": """mlp.layer_norm""", """self_attention.o""": """self_attention.attention.o""", """decoder.embeddings.embedding""": """decoder.embed_tokens.weight""", """decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""", """decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.logits_dense.weight""": """decoder.lm_head.weight""", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowercase_ = """.""".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowercase_ = new_key.replace(UpperCamelCase_ , UpperCamelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowercase_ = new_key.replace(UpperCamelCase_ , UpperCamelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowercase_ = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , UpperCamelCase_ ) lowercase_ = new_key.replace("""encoder""" , """encoder.encoder""" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowercase_ = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , UpperCamelCase_ ) lowercase_ = flax_dict[key] lowercase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowercase_ = torch.from_numpy(converted_dict[key].T ) else: lowercase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> int: '''simple docstring''' lowercase_ = get_flax_param(UpperCamelCase_ ) if not use_large: lowercase_ = PixaStructVisionConfig() lowercase_ = PixaStructTextConfig() else: lowercase_ = PixaStructVisionConfig( hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 ) lowercase_ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 ) lowercase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCamelCase_ ) lowercase_ = PixaStructForConditionalGeneration(UpperCamelCase_ ) lowercase_ = rename_and_convert_flax_params(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) lowercase_ = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" ) lowercase_ = PixaStructImageProcessor() lowercase_ = PixaStructProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ ) if use_large: lowercase_ = 40_96 lowercase_ = True # mkdir if needed os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) processor.save_pretrained(UpperCamelCase_ ) print("""Model saved in {}""".format(UpperCamelCase_ ) ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--use_large", action="store_true", help="Use large model.") parser.add_argument("--is_vqa", action="store_true", help="Use large model.") UpperCAmelCase : Tuple = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
136
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
"""simple docstring""" def _snake_case ( ) -> int: '''simple docstring''' return [ a * b * (10_00 - a - b) for a in range(1 , 9_99 ) for b in range(UpperCamelCase_ , 9_99 ) if (a * a + b * b == (10_00 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'''{solution() = }''')
315
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
from datetime import datetime as dt import os from github import Github lowerCamelCase = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def a_ ( ): '''simple docstring''' _lowerCamelCase : Tuple =Github(os.environ['GITHUB_TOKEN'] ) _lowerCamelCase : Union[str, Any] =g.get_repo('huggingface/transformers' ) _lowerCamelCase : Union[str, Any] =repo.get_issues(state='open' ) for issue in open_issues: _lowerCamelCase : Any =sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE__ : i.created_at , reverse=UpperCamelCase_ ) _lowerCamelCase : List[str] =comments[0] if len(UpperCamelCase_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
199
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__) def lowercase ( _snake_case : int=2 , _snake_case : Dict=3 , _snake_case : Optional[Any]=16 , _snake_case : List[str] = 10 , _snake_case : Optional[Any] = 2 ) ->str: """simple docstring""" def get_dataset(_snake_case : Optional[Any] ): __snake_case : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(UpperCamelCase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __snake_case : Tuple = get_dataset(UpperCamelCase_ ) __snake_case : Tuple = get_dataset(UpperCamelCase_ ) __snake_case : Any = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 ) __snake_case : List[str] = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str , _snake_case : Dict , _snake_case : Tuple=None ) ->str: """simple docstring""" __snake_case : Dict = [] for epoch in range(UpperCamelCase_ ): # Train quickly model.train() for batch in dataloader: __snake_case , __snake_case : Any = batch __snake_case : Optional[int] = model(UpperCamelCase_ ) __snake_case : Optional[int] = torch.nn.functional.mse_loss(UpperCamelCase_ , UpperCamelCase_ ) accelerator.backward(UpperCamelCase_ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__(self ): '''simple docstring''' super().__init__() __snake_case : Union[str, Any] = nn.Parameter(torch.randn(1 ) ) __snake_case : Optional[int] = nn.Parameter(torch.randn(1 ) ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return x * self.a + self.b class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __snake_case : Optional[Any] = DummyModel() __snake_case : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __snake_case , __snake_case : Union[str, Any] = dummy_dataloaders() __snake_case : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase_ , automatic_checkpoint_naming=lowerCamelCase_ ) # Train baseline __snake_case : List[str] = Accelerator(project_config=lowerCamelCase_ ) __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __snake_case : Any = DummyModel() __snake_case : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __snake_case , __snake_case : Optional[int] = dummy_dataloaders() # Train baseline __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case : int = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save initial __snake_case : Any = os.path.join(lowerCamelCase_ , '''initial''' ) accelerator.save_state(lowerCamelCase_ ) ((__snake_case) , (__snake_case)) : List[str] = model.a.item(), model.b.item() __snake_case : List[Any] = optimizer.state_dict() __snake_case : Optional[int] = train(3 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ((__snake_case) , (__snake_case)) : Optional[Any] = model.a.item(), model.b.item() __snake_case : Tuple = optimizer.state_dict() # Train partially set_seed(42 ) __snake_case : Optional[int] = DummyModel() __snake_case : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __snake_case , __snake_case : int = dummy_dataloaders() __snake_case : Dict = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case : Dict = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) accelerator.load_state(lowerCamelCase_ ) ((__snake_case) , (__snake_case)) : Dict = model.a.item(), model.b.item() __snake_case : Any = optimizer.state_dict() self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) __snake_case : Any = train(2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save everything __snake_case : List[Any] = os.path.join(lowerCamelCase_ , '''checkpoint''' ) accelerator.save_state(lowerCamelCase_ ) # Load everything back in and make sure all states work accelerator.load_state(lowerCamelCase_ ) test_rands += train(1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ((__snake_case) , (__snake_case)) : Dict = model.a.item(), model.b.item() __snake_case : str = optimizer.state_dict() self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __snake_case : str = DummyModel() __snake_case : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __snake_case , __snake_case : List[Any] = dummy_dataloaders() __snake_case : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase_ ) # Train baseline __snake_case : List[str] = Accelerator(project_dir=lowerCamelCase_ , project_config=lowerCamelCase_ ) __snake_case , __snake_case , __snake_case , __snake_case : Dict = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save initial accelerator.save_state() ((__snake_case) , (__snake_case)) : Optional[int] = model.a.item(), model.b.item() __snake_case : List[str] = optimizer.state_dict() __snake_case : Optional[Any] = train(3 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ((__snake_case) , (__snake_case)) : int = model.a.item(), model.b.item() __snake_case : Any = optimizer.state_dict() # Train partially set_seed(42 ) __snake_case : str = DummyModel() __snake_case : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __snake_case , __snake_case : List[Any] = dummy_dataloaders() __snake_case : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase_ ) __snake_case : str = Accelerator(project_dir=lowerCamelCase_ , project_config=lowerCamelCase_ ) __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) accelerator.load_state(os.path.join(lowerCamelCase_ , '''checkpoints''' , '''checkpoint_0''' ) ) ((__snake_case) , (__snake_case)) : Optional[int] = model.a.item(), model.b.item() __snake_case : List[Any] = optimizer.state_dict() self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) __snake_case : List[Any] = train(2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(lowerCamelCase_ , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ((__snake_case) , (__snake_case)) : str = model.a.item(), model.b.item() __snake_case : Tuple = optimizer.state_dict() self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Union[str, Any] = torch.tensor([1, 2, 3] ) __snake_case : int = torch.tensor([2, 3, 4] ) __snake_case : List[Any] = DummyModel() __snake_case : Optional[Any] = torch.optim.Adam(net.parameters() ) __snake_case : Any = Accelerator() with self.assertRaises(lowerCamelCase_ ) as ve: accelerator.register_for_checkpointing(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) __snake_case : Optional[int] = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __snake_case : List[Any] = DummyModel() __snake_case : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __snake_case : List[str] = torch.optim.lr_scheduler.StepLR(lowerCamelCase_ , step_size=1 , gamma=0.99 ) __snake_case , __snake_case : int = dummy_dataloaders() __snake_case : str = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase_ ) # Train baseline __snake_case : Tuple = Accelerator(project_dir=lowerCamelCase_ , project_config=lowerCamelCase_ ) __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save initial accelerator.save_state() __snake_case : Tuple = scheduler.state_dict() train(3 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) self.assertNotEqual(lowerCamelCase_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(lowerCamelCase_ , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(lowerCamelCase_ , scheduler.state_dict() ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __snake_case : int = DummyModel() __snake_case : Any = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase_ , total_limit=2 ) # Train baseline __snake_case : Optional[Any] = Accelerator(project_dir=lowerCamelCase_ , project_config=lowerCamelCase_ ) __snake_case : List[Any] = accelerator.prepare(lowerCamelCase_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase_ , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase_ , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : int = """/tmp/accelerate/state_checkpointing""" SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel() SCREAMING_SNAKE_CASE : List[str] = torch.optim.Adam(params=model.parameters(), lr=1E-3) SCREAMING_SNAKE_CASE : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dummy_dataloaders() SCREAMING_SNAKE_CASE : int = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline SCREAMING_SNAKE_CASE : Tuple = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: SCREAMING_SNAKE_CASE : Tuple = group["""params"""][0].device break assert param_device.type == accelerator.device.type SCREAMING_SNAKE_CASE : Optional[int] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: SCREAMING_SNAKE_CASE : Optional[Any] = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: SCREAMING_SNAKE_CASE : Optional[int] = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
102
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
21
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> list[list[float]]: '''simple docstring''' __snake_case : Dict = [] for data in source_data: for i, el in enumerate(UpperCamelCase_ ): if len(UpperCamelCase_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(UpperCamelCase_ ) ) return data_lists def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ) -> list[list[float]]: '''simple docstring''' __snake_case : str = [] for dlist, weight in zip(UpperCamelCase_ , UpperCamelCase_ ): __snake_case : int = min(UpperCamelCase_ ) __snake_case : Tuple = max(UpperCamelCase_ ) __snake_case : List[str] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: __snake_case : Dict = F"Invalid weight of {weight:f} provided" raise ValueError(UpperCamelCase_ ) score_lists.append(UpperCamelCase_ ) return score_lists def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> list[float]: '''simple docstring''' __snake_case : Optional[int] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(UpperCamelCase_ ): __snake_case : Optional[int] = final_scores[j] + ele return final_scores def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> list[list[float]]: '''simple docstring''' __snake_case : Dict = get_data(UpperCamelCase_ ) __snake_case : Dict = calculate_each_score(UpperCamelCase_ , UpperCamelCase_ ) __snake_case : Tuple = generate_final_scores(UpperCamelCase_ ) # append scores to source data for i, ele in enumerate(UpperCamelCase_ ): source_data[i].append(UpperCamelCase_ ) return source_data
172
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A ( __lowerCAmelCase , unittest.TestCase ): __magic_name__ = LDMTextToImagePipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } __magic_name__ = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = False def __lowerCAmelCase ( self ) -> str: """simple docstring""" torch.manual_seed(0 ) A : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) A : Any = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) A : Dict = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , ) torch.manual_seed(0 ) A : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A : Union[str, Any] = CLIPTextModel(lowerCamelCase_ ) A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) A : Tuple = { '''unet''': unet, '''scheduler''': scheduler, '''vqvae''': vae, '''bert''': text_encoder, '''tokenizer''': tokenizer, } return components def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Dict: """simple docstring""" if str(lowerCamelCase_ ).startswith('''mps''' ): A : Dict = torch.manual_seed(lowerCamelCase_ ) else: A : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) A : Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : str = self.get_dummy_components() A : Tuple = LDMTextToImagePipeline(**lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) A : List[str] = self.get_dummy_inputs(lowerCamelCase_ ) A : Tuple = pipe(**lowerCamelCase_ ).images A : str = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) A : Tuple = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=torch.floataa , SCREAMING_SNAKE_CASE=0 ) -> List[Any]: """simple docstring""" A : Union[str, Any] = torch.manual_seed(lowerCamelCase_ ) A : Any = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 32, 32) ) A : str = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ ) A : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) A : str = self.get_inputs(lowerCamelCase_ ) A : Any = pipe(**lowerCamelCase_ ).images A : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) A : Optional[int] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] ) A : List[Any] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=torch.floataa , SCREAMING_SNAKE_CASE=0 ) -> int: """simple docstring""" A : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) A : Union[str, Any] = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 32, 32) ) A : Union[str, Any] = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ ) A : str = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) A : Any = self.get_inputs(lowerCamelCase_ ) A : Any = pipe(**lowerCamelCase_ ).images[0] A : Tuple = load_numpy( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' ) A : Dict = np.abs(expected_image - image ).max() assert max_diff < 1e-3
3
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Union[str, Any] = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : int = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
318
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging A : str = logging.get_logger(__name__) A : Dict = {'vocab_file': 'spiece.model'} A : int = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } A : int = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) A : Optional[int] = 0 A : Union[str, Any] = 1 A : Tuple = 2 A : Dict = 3 A : str = 4 class __A( __lowerCAmelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = '''left''' def __init__( self , _snake_case , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<sep>" , _snake_case="<pad>" , _snake_case="<cls>" , _snake_case="<mask>" , _snake_case=["<eop>", "<eod>"] , _snake_case = None , **_snake_case , ) -> Tuple: '''simple docstring''' __a = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) __a = 3 __a = do_lower_case __a = remove_space __a = keep_accents __a = vocab_file __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase_ ) @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[str]: '''simple docstring''' __a = self.__dict__.copy() __a = None return state def __setstate__( self , _snake_case ) -> Dict: '''simple docstring''' __a = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' if self.remove_space: __a = ''' '''.join(inputs.strip().split() ) else: __a = inputs __a = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' ) if not self.keep_accents: __a = unicodedata.normalize('''NFKD''' , lowerCamelCase_ ) __a = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] ) if self.do_lower_case: __a = outputs.lower() return outputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' __a = self.preprocess_text(lowerCamelCase_ ) __a = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) __a = [] for piece in pieces: if len(lowerCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): __a = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __a = cur_pieces[1:] else: __a = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowerCamelCase_ ) else: new_pieces.append(lowerCamelCase_ ) return new_pieces def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' return self.sp_model.PieceToId(lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' return self.sp_model.IdToPiece(lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' __a = ''''''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = True , **_snake_case , ) -> Optional[Any]: '''simple docstring''' __a = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_ ) __a = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __a = [] __a = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) ) __a = [] sub_texts.append(lowerCamelCase_ ) else: current_sub_text.append(lowerCamelCase_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens __a = ''''''.join(lowerCamelCase_ ) __a = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __a = self.clean_up_tokenization(lowerCamelCase_ ) return clean_text else: return text def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> int: '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> Optional[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is not None: return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] return ([0] * len(lowerCamelCase_ )) + [1, 1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> int: '''simple docstring''' __a = [self.sep_token_id] __a = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[Any]: '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __a = os.path.join( lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , '''wb''' ) as fi: __a = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
6
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __SCREAMING_SNAKE_CASE : """simple docstring""" pass
63
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
'''simple docstring''' import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset _SCREAMING_SNAKE_CASE = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) _SCREAMING_SNAKE_CASE = dataset.iloc[:, 1:2].values _SCREAMING_SNAKE_CASE = dataset.iloc[:, 2].values _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_test_split(X, y, test_size=0.2, random_state=0) _SCREAMING_SNAKE_CASE = PolynomialFeatures(degree=4) _SCREAMING_SNAKE_CASE = poly_reg.fit_transform(X) _SCREAMING_SNAKE_CASE = LinearRegression() pol_reg.fit(X_poly, y) def __a(): '''simple docstring''' plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color="red" ) plt.plot(UpperCamelCase_ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase_ ) ) , color="blue" ) plt.title("Truth or Bluff (Linear Regression)" ) plt.xlabel("Position level" ) plt.ylabel("Salary" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
158
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
"""simple docstring""" import torch from diffusers import DiffusionPipeline class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): def __init__( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict): """simple docstring""" super().__init__() self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_) def __call__( self : Tuple): """simple docstring""" lowercase_ = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) lowercase_ = 1 lowercase_ = self.unet(lowerCamelCase_ , lowerCamelCase_).sample lowercase_ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_).prev_sample lowercase_ = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase_) return result
136
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
"""simple docstring""" def _snake_case ( _snake_case : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = [0] * len(UpperCamelCase_ ) _A = [] _A = [] _A = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(UpperCamelCase_ ) ): if indegree[i] == 0: queue.append(UpperCamelCase_ ) while queue: _A = queue.pop(0 ) cnt += 1 topo.append(UpperCamelCase_ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(UpperCamelCase_ ) if cnt != len(UpperCamelCase_ ): print('Cycle exists' ) else: print(UpperCamelCase_ ) # Adjacency List of Graph a = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
315
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] =[0] * len(UpperCamelCase_ ) for i in range(1 , len(UpperCamelCase_ ) ): # use last results for better performance - dynamic programming _lowerCamelCase : Optional[int] =prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _lowerCamelCase : Union[str, Any] =prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _lowerCamelCase : Optional[int] =j return prefix_result def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' return max(prefix_function(UpperCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
199
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
102
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) class _lowerCamelCase( __lowerCAmelCase ): lowercase_ : Union[str, Any] = ["""pixel_values"""] def __init__( self, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = PIL.Image.BICUBIC, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 1 / 2_55, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> Tuple: """simple docstring""" super().__init__(**lowerCamelCase_) _lowercase : Tuple = size if size is not None else {'height': 2_56, 'width': 2_56} _lowercase : int = get_size_dict(lowerCamelCase_) _lowercase : int = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} _lowercase : str = get_size_dict(lowerCamelCase_, param_name='crop_size') _lowercase : Tuple = do_resize _lowercase : List[Any] = size _lowercase : str = resample _lowercase : Any = do_center_crop _lowercase : Optional[Any] = crop_size _lowercase : List[Any] = do_rescale _lowercase : Dict = rescale_factor _lowercase : Optional[int] = do_normalize _lowercase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowercase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = PIL.Image.BICUBIC, lowerCamelCase = None, **lowerCamelCase, ) -> Union[str, Any]: """simple docstring""" _lowercase : Tuple = get_size_dict(lowerCamelCase_) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''') return resize( lowerCamelCase_, size=(size['height'], size['width']), resample=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> Any: """simple docstring""" _lowercase : Tuple = get_size_dict(lowerCamelCase_) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''') return center_crop(lowerCamelCase_, size=(size['height'], size['width']), data_format=lowerCamelCase_, **lowerCamelCase_) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> List[Any]: """simple docstring""" return rescale(lowerCamelCase_, scale=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, **lowerCamelCase, ) -> List[Any]: """simple docstring""" return normalize(lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_, data_format=lowerCamelCase_, **lowerCamelCase_) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = ChannelDimension.FIRST, **lowerCamelCase, ) -> Any: """simple docstring""" _lowercase : Any = do_resize if do_resize is not None else self.do_resize _lowercase : Dict = resample if resample is not None else self.resample _lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale _lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Optional[Any] = image_mean if image_mean is not None else self.image_mean _lowercase : Optional[int] = image_std if image_std is not None else self.image_std _lowercase : str = size if size is not None else self.size _lowercase : Dict = get_size_dict(lowerCamelCase_) _lowercase : List[str] = crop_size if crop_size is not None else self.crop_size _lowercase : Union[str, Any] = get_size_dict(lowerCamelCase_, param_name='crop_size') _lowercase : Optional[int] = make_list_of_images(lowerCamelCase_) if not valid_images(lowerCamelCase_): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. _lowercase : str = [to_numpy_array(lowerCamelCase_) for image in images] if do_resize: _lowercase : Optional[int] = [self.resize(image=lowerCamelCase_, size=lowerCamelCase_, resample=lowerCamelCase_) for image in images] if do_center_crop: _lowercase : Union[str, Any] = [self.center_crop(image=lowerCamelCase_, size=lowerCamelCase_) for image in images] if do_rescale: _lowercase : List[Any] = [self.rescale(image=lowerCamelCase_, scale=lowerCamelCase_) for image in images] if do_normalize: _lowercase : List[Any] = [self.normalize(image=lowerCamelCase_, mean=lowerCamelCase_, std=lowerCamelCase_) for image in images] _lowercase : List[str] = [to_channel_dimension_format(lowerCamelCase_, lowerCamelCase_) for image in images] _lowercase : Optional[int] = {'pixel_values': images} return BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_)
21
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
"""simple docstring""" import baseaa def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> bytes: '''simple docstring''' return baseaa.aaaencode(string.encode('utf-8' ) ) def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> str: '''simple docstring''' return baseaa.aaadecode(UpperCamelCase_ ).decode('utf-8' ) if __name__ == "__main__": import doctest doctest.testmod()
172
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase : int = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Any = DPTConfig() if "large" in checkpoint_url: A : str = 1024 A : Tuple = 4096 A : Union[str, Any] = 24 A : List[str] = 16 A : List[Any] = [5, 11, 17, 23] A : Optional[Any] = [256, 512, 1024, 1024] A : str = (1, 384, 384) if "ade" in checkpoint_url: A : Optional[int] = True A : Optional[int] = 150 A : Optional[Any] = '''huggingface/label-files''' A : Any = '''ade20k-id2label.json''' A : Any = json.load(open(cached_download(hf_hub_url(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) ) , '''r''' ) ) A : List[Any] = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Any = {v: k for k, v in idalabel.items()} A : int = [1, 150, 480, 480] return config, expected_shape def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): A : Any = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: A : Optional[Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: A : int = name.replace('''patch_embed''' , '''patch_embeddings''' ) if "pos_embed" in name: A : Optional[int] = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: A : str = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: A : List[Any] = name.replace('''proj''' , '''projection''' ) if "blocks" in name: A : Any = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: A : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name: A : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : str = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: A : int = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: A : Optional[Any] = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: A : Any = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: A : int = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: A : Dict = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: A : List[Any] = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: A : Optional[Any] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 A : Optional[int] = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: A : Union[str, Any] = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: A : List[str] = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: A : int = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: A : Any = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: A : List[Any] = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: A : Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: A : Optional[Any] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: A : Tuple = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: A : int = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: A : Any = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: A : int = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: A : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: A : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: A : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: A : str = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: A : str = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: A : Optional[Any] = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: A : Union[str, Any] = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: A : Tuple = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: A : Union[str, Any] = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: A : int = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A : Optional[int] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' ) A : str = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A : Tuple = in_proj_weight[: config.hidden_size, :] A : Dict = in_proj_bias[: config.hidden_size] A : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A : Any = in_proj_weight[ -config.hidden_size :, : ] A : Any = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( ): '''simple docstring''' A : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : List[str] = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A, A : int = get_dpt_config(UpperCamelCase_ ) # load original state_dict from URL A : int = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(UpperCamelCase_ ) # rename keys for key in state_dict.copy().keys(): A : Dict = state_dict.pop(UpperCamelCase_ ) A : Tuple = val # read in qkv matrices read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ ) # load HuggingFace model A : Optional[int] = DPTForSemanticSegmentation(UpperCamelCase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() # Check outputs on an image A : Union[str, Any] = 480 if '''ade''' in checkpoint_url else 384 A : Dict = DPTImageProcessor(size=UpperCamelCase_ ) A : Union[str, Any] = prepare_img() A : Tuple = image_processor(UpperCamelCase_ , return_tensors='''pt''' ) # forward pass A : str = model(**UpperCamelCase_ ).logits if '''ade''' in checkpoint_url else model(**UpperCamelCase_ ).predicted_depth # Assert logits A : str = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] ) if "ade" in checkpoint_url: A : Any = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] ) assert outputs.shape == torch.Size(UpperCamelCase_ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , UpperCamelCase_ ) ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCamelCase_ ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase_ , ) if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) lowercase : Dict = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
3
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict: '''simple docstring''' lowerCamelCase_ : Union[str, Any] = BertConfig.from_json_file(UpperCamelCase_ ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase_ : int = BertForPreTraining(UpperCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , UpperCamelCase_ ) if __name__ == "__main__": __lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowercase : int = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
318
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class __A( datasets.BuilderConfig ): snake_case_ = None class __A( datasets.ArrowBasedBuilder ): snake_case_ = PandasConfig def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __a = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCamelCase_ , (str, list, tuple) ): __a = data_files if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __a = [dl_manager.iter_files(lowerCamelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __a = [] for split_name, files in data_files.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __a = [dl_manager.iter_files(lowerCamelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={'''files''': files} ) ) return splits def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __a = table_cast(lowerCamelCase_ , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict: '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ): with open(lowerCamelCase_ , '''rb''' ) as f: __a = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase_ ) ) yield i, self._cast_table(lowerCamelCase_ )
6
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
'''simple docstring''' def _lowerCamelCase ( lowercase : int , lowercase : str ) -> int: return number | (1 << position) def _lowerCamelCase ( lowercase : int , lowercase : Any ) -> int: return number & ~(1 << position) def _lowerCamelCase ( lowercase : Tuple , lowercase : int ) -> int: return number ^ (1 << position) def _lowerCamelCase ( lowercase : int , lowercase : Union[str, Any] ) -> bool: return ((number >> position) & 1) == 1 def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Dict ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
63
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "vocab_file": "vocab.txt", "merges_file": "bpe.codes", } _SCREAMING_SNAKE_CASE = { "vocab_file": { "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt", "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt", }, "merges_file": { "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes", "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes", }, } _SCREAMING_SNAKE_CASE = { "vinai/phobert-base": 2_56, "vinai/phobert-large": 2_56, } def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = set() _lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowerCAmelCase = char _lowerCAmelCase = set(UpperCamelCase_ ) return pairs class lowerCAmelCase_ ( __lowerCAmelCase ): __lowerCamelCase : List[str] = VOCAB_FILES_NAMES __lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , **_lowerCAmelCase , ) -> Tuple: super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) _lowerCAmelCase = vocab_file _lowerCAmelCase = merges_file _lowerCAmelCase = {} _lowerCAmelCase = 0 _lowerCAmelCase = 1 _lowerCAmelCase = 2 _lowerCAmelCase = 3 self.add_from_file(lowerCamelCase_ ) _lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(lowerCamelCase_ , encoding="utf-8" ) as merges_handle: _lowerCAmelCase = merges_handle.read().split("\n" )[:-1] _lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] _lowerCAmelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) _lowerCAmelCase = {} def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] _lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> Tuple: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Union[str, Any]: _lowerCAmelCase = [self.sep_token_id] _lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self ) -> Tuple: return len(self.encoder ) def _snake_case ( self ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self , _lowerCAmelCase ) -> int: if token in self.cache: return self.cache[token] _lowerCAmelCase = tuple(lowerCamelCase_ ) _lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) _lowerCAmelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: _lowerCAmelCase = min(lowerCamelCase_ , key=lambda _lowerCAmelCase : self.bpe_ranks.get(lowerCamelCase_ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break _lowerCAmelCase , _lowerCAmelCase = bigram _lowerCAmelCase = [] _lowerCAmelCase = 0 while i < len(lowerCamelCase_ ): try: _lowerCAmelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _lowerCAmelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowerCAmelCase = tuple(lowerCamelCase_ ) _lowerCAmelCase = new_word if len(lowerCamelCase_ ) == 1: break else: _lowerCAmelCase = get_pairs(lowerCamelCase_ ) _lowerCAmelCase = "@@ ".join(lowerCamelCase_ ) _lowerCAmelCase = word[:-4] _lowerCAmelCase = word return word def _snake_case ( self , _lowerCAmelCase ) -> List[str]: _lowerCAmelCase = [] _lowerCAmelCase = re.findall(r"\S+\n?" , lowerCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(" " ) ) ) return split_tokens def _snake_case ( self , _lowerCAmelCase ) -> Any: return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self , _lowerCAmelCase ) -> Tuple: return self.decoder.get(lowerCamelCase_ , self.unk_token ) def _snake_case ( self , _lowerCAmelCase ) -> Any: _lowerCAmelCase = " ".join(lowerCamelCase_ ).replace("@@ " , "" ).strip() return out_string def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Union[str, Any]: if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase = os.path.join( lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _lowerCAmelCase = os.path.join( lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.merges_file , lowerCamelCase_ ) return out_vocab_file, out_merge_file def _snake_case ( self , _lowerCAmelCase ) -> str: if isinstance(lowerCamelCase_ , lowerCamelCase_ ): try: with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as fd: self.add_from_file(lowerCamelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' ) return _lowerCAmelCase = f.readlines() for lineTmp in lines: _lowerCAmelCase = lineTmp.strip() _lowerCAmelCase = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) _lowerCAmelCase = line[:idx] _lowerCAmelCase = len(self.encoder )
158
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup UpperCAmelCase : Tuple = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]: '''simple docstring''' lowercase_ = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): lowercase_ = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() lowercase_ = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(F"Job {i:>2} is {job[0]} at {job[1]}")
136
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } a = { '''bert-base-uncased''': 512, '''bert-large-uncased''': 512, '''bert-base-cased''': 512, '''bert-large-cased''': 512, '''bert-base-multilingual-uncased''': 512, '''bert-base-multilingual-cased''': 512, '''bert-base-chinese''': 512, '''bert-base-german-cased''': 512, '''bert-large-uncased-whole-word-masking''': 512, '''bert-large-cased-whole-word-masking''': 512, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 512, '''bert-large-cased-whole-word-masking-finetuned-squad''': 512, '''bert-base-cased-finetuned-mrpc''': 512, '''bert-base-german-dbmdz-cased''': 512, '''bert-base-german-dbmdz-uncased''': 512, '''TurkuNLP/bert-base-finnish-cased-v1''': 512, '''TurkuNLP/bert-base-finnish-uncased-v1''': 512, '''wietsedv/bert-base-dutch-cased''': 512, } a = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[Any] = BertTokenizer def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="[UNK]" , _UpperCAmelCase : int="[SEP]" , _UpperCAmelCase : Optional[int]="[PAD]" , _UpperCAmelCase : Tuple="[CLS]" , _UpperCAmelCase : Any="[MASK]" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : Dict , ): super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , ) _A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case or normalizer_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowerCamelCase_ ) != tokenize_chinese_chars ): _A = getattr(lowerCamelCase_ , normalizer_state.pop('type' ) ) _A = do_lower_case _A = strip_accents _A = tokenize_chinese_chars _A = normalizer_class(**lowerCamelCase_ ) _A = do_lower_case def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=None ): _A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): _A = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ ) return tuple(lowerCamelCase_ )
315
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
lowerCamelCase = {str(digit): digit**5 for digit in range(10)} def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase_ ) ) def a_ ( ): '''simple docstring''' return sum( number for number in range(1_000 , 1_000_000 ) if number == digits_fifth_powers_sum(UpperCamelCase_ ) ) if __name__ == "__main__": print(solution())
199
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowercase ( _snake_case : Tuple ) ->Tuple: """simple docstring""" __snake_case : Any = filter(lambda _snake_case : p.requires_grad , model.parameters() ) __snake_case : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__) def lowercase ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) ->Optional[Any]: """simple docstring""" if metric == "rouge2": __snake_case : Optional[Any] = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": __snake_case : Dict = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": __snake_case : Union[str, Any] = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) __snake_case : List[Any] = ModelCheckpoint( dirpath=UpperCamelCase_ , filename=UpperCamelCase_ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowercase ( _snake_case : List[str] , _snake_case : str ) ->str: """simple docstring""" return EarlyStopping( monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=UpperCamelCase_ , verbose=UpperCamelCase_ , ) class _UpperCAmelCase ( pl.Callback ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' __snake_case : str = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCamelCase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_=True ): '''simple docstring''' logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __snake_case : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results __snake_case : Dict = Path(pl_module.hparams.output_dir ) if type_path == "test": __snake_case : Any = od / '''test_results.txt''' __snake_case : List[Any] = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __snake_case : List[str] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __snake_case : int = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=lowerCamelCase_ ) generations_file.parent.mkdir(exist_ok=lowerCamelCase_ ) with open(lowerCamelCase_ , '''a+''' ) as writer: for key in sorted(lowerCamelCase_ ): if key in ["log", "progress_bar", "preds"]: continue __snake_case : List[str] = metrics[key] if isinstance(lowerCamelCase_ , torch.Tensor ): __snake_case : List[str] = val.item() __snake_case : Optional[int] = f"""{key}: {val:.6f}\n""" writer.write(lowerCamelCase_ ) if not save_generations: return if "preds" in metrics: __snake_case : List[str] = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(lowerCamelCase_ ) @rank_zero_only def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' try: __snake_case : List[Any] = pl_module.model.model.num_parameters() except AttributeError: __snake_case : List[str] = pl_module.model.num_parameters() __snake_case : List[str] = count_trainable_parameters(lowerCamelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} ) @rank_zero_only def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCamelCase_ , lowerCamelCase_ , '''test''' ) @rank_zero_only def SCREAMING_SNAKE_CASE (self , a_ , a_ ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
102
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[str] = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _lowerCamelCase( __lowerCAmelCase ): lowercase_ : Dict = """trocr""" lowercase_ : int = ["""past_key_values"""] lowercase_ : int = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self, lowerCamelCase=5_02_65, lowerCamelCase=10_24, lowerCamelCase=12, lowerCamelCase=16, lowerCamelCase=40_96, lowerCamelCase="gelu", lowerCamelCase=5_12, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, **lowerCamelCase, ) -> Dict: """simple docstring""" _lowercase : List[str] = vocab_size _lowercase : Optional[Any] = d_model _lowercase : Any = decoder_layers _lowercase : List[Any] = decoder_attention_heads _lowercase : Optional[int] = decoder_ffn_dim _lowercase : List[Any] = activation_function _lowercase : Any = max_position_embeddings _lowercase : List[str] = dropout _lowercase : int = attention_dropout _lowercase : List[str] = activation_dropout _lowercase : Optional[int] = init_std _lowercase : Union[str, Any] = decoder_layerdrop _lowercase : Optional[int] = use_cache _lowercase : List[Any] = scale_embedding _lowercase : Optional[int] = use_learned_position_embeddings _lowercase : Dict = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, **lowerCamelCase_, )
21
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer _a : int= logging.get_logger(__name__) _a : List[Any]= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _a : Optional[Any]= { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _a : Union[str, Any]= { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } _a : Optional[int]= { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } _a : Union[str, Any]= { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } _a : int= { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } _a : List[str]= { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } _a : int= { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } _a : List[str]= { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } _a : int= { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class UpperCamelCase ( __lowerCAmelCase ): UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCamelCase ( __lowerCAmelCase ): UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION _a : Dict= collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) _a : Union[str, Any]= collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) _a : str= R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__lowerCAmelCase ) class UpperCamelCase : def __call__(self : Dict , _A : List[Any] , _A : Optional[str] = None , _A : Optional[str] = None , _A : Union[bool, str] = False , _A : Union[bool, str] = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , **_A : List[str] , ) -> Dict: if titles is None and texts is None: return super().__call__( lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , ) elif titles is None or texts is None: __snake_case : str = titles if texts is None else texts return super().__call__( lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , ) __snake_case : Union[str, Any] = titles if not isinstance(lowerCamelCase_ , lowerCamelCase_) else [titles] __snake_case : Any = texts if not isinstance(lowerCamelCase_ , lowerCamelCase_) else [texts] __snake_case : Union[str, Any] = len(lowerCamelCase_) __snake_case : Optional[int] = questions if not isinstance(lowerCamelCase_ , lowerCamelCase_) else [questions] * n_passages if len(lowerCamelCase_) != len(lowerCamelCase_): raise ValueError( f"There should be as many titles than texts but got {len(lowerCamelCase_)} titles and {len(lowerCamelCase_)} texts.") __snake_case : Optional[Any] = super().__call__(lowerCamelCase_ , lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_)['input_ids'] __snake_case : str = super().__call__(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_)['input_ids'] __snake_case : Optional[Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ , lowerCamelCase_) ] } if return_attention_mask is not False: __snake_case : Optional[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) __snake_case : Any = attention_mask return self.pad(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors=lowerCamelCase_) def _lowercase (self : str , _A : BatchEncoding , _A : DPRReaderOutput , _A : int = 16 , _A : int = 64 , _A : int = 4 , ) -> Union[str, Any]: __snake_case : Optional[int] = reader_input['input_ids'] __snake_case , __snake_case , __snake_case : Optional[Any] = reader_output[:3] __snake_case : Tuple = len(lowerCamelCase_) __snake_case : Dict = sorted(range(lowerCamelCase_) , reverse=lowerCamelCase_ , key=relevance_logits.__getitem__) __snake_case : str = [] for doc_id in sorted_docs: __snake_case : Tuple = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence __snake_case : List[str] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __snake_case : Optional[Any] = sequence_ids.index(self.pad_token_id) else: __snake_case : Dict = len(lowerCamelCase_) __snake_case : Union[str, Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase_ , top_spans=lowerCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase_ , start_index=lowerCamelCase_ , end_index=lowerCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCamelCase_) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowercase (self : List[Any] , _A : List[int] , _A : List[int] , _A : int , _A : int , ) -> Tuple: __snake_case : int = [] for start_index, start_score in enumerate(lowerCamelCase_): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) __snake_case : Dict = sorted(lowerCamelCase_ , key=lambda _A: x[1] , reverse=lowerCamelCase_) __snake_case : Dict = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]") __snake_case : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCamelCase_) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowerCAmelCase ) class UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase ): UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Dict = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : int = ["""input_ids""", """attention_mask"""]
172
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer lowercase : Optional[Any] = 'bart' lowercase : int = True @st.cache(allow_output_mutation=UpperCamelCase_ ) def lowerCAmelCase_ ( ): '''simple docstring''' if LOAD_DENSE_INDEX: A : int = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) A : Union[str, Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) A : Optional[Any] = qar_model.eval() else: A, A : int = (None, None) if MODEL_TYPE == "bart": A : Dict = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) A : str = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) A : Any = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) A : Dict = sas_model.eval() else: A, A : str = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=UpperCamelCase_ ) def lowerCAmelCase_ ( ): '''simple docstring''' if LOAD_DENSE_INDEX: A : Union[str, Any] = faiss.StandardGpuResources() A : List[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] A : Union[str, Any] = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) A : Optional[Any] = faiss.IndexFlatIP(128 ) A : Optional[int] = faiss.index_cpu_to_gpu(UpperCamelCase_ , 1 , UpperCamelCase_ ) wikiaab_gpu_index_flat.add(UpperCamelCase_ ) # TODO fix for larger GPU else: A, A : Optional[Any] = (None, None) A : Optional[int] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=UpperCamelCase_ ) def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) A : Optional[Any] = elia['''train_eli5'''] A : Tuple = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) A : Union[str, Any] = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(UpperCamelCase_ ) return (elia_train, eli5_train_q_index) lowercase , lowercase , lowercase : Optional[int] = load_indexes() lowercase , lowercase , lowercase , lowercase : Optional[Any] = load_models() lowercase , lowercase : str = load_train_data() def lowerCAmelCase_ ( snake_case__ , snake_case__=10 ): '''simple docstring''' A : List[Any] = embed_questions_for_retrieval([question] , UpperCamelCase_ , UpperCamelCase_ ) A, A : Optional[int] = eli5_train_q_index.search(UpperCamelCase_ , UpperCamelCase_ ) A : Optional[Any] = [elia_train[int(UpperCamelCase_ )] for i in I[0]] return nn_examples def lowerCAmelCase_ ( snake_case__ , snake_case__="wiki40b" , snake_case__="dense" , snake_case__=10 ): '''simple docstring''' if source == "none": A, A : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": A, A : int = query_qa_dense_index( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else: A, A : Union[str, Any] = query_es_index( UpperCamelCase_ , UpperCamelCase_ , index_name='''english_wiki40b_snippets_100w''' , n_results=UpperCamelCase_ , ) A : Tuple = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] A : Tuple = '''question: {} context: {}'''.format(UpperCamelCase_ , UpperCamelCase_ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda snake_case__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None), } ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=64 , snake_case__=256 , snake_case__=False , snake_case__=2 , snake_case__=0.95 , snake_case__=0.8 ): '''simple docstring''' with torch.no_grad(): A : Any = qa_sas_generate( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_answers=1 , num_beams=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ , do_sample=UpperCamelCase_ , temp=UpperCamelCase_ , top_p=UpperCamelCase_ , top_k=UpperCamelCase_ , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar lowercase : int = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' lowercase : Tuple = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) lowercase : List[str] = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] lowercase : Any = st.sidebar.checkbox('Demo options') if demo_options: lowercase : Optional[int] = st.sidebar.selectbox( '', action_list, index=3, ) lowercase : Tuple = action_list.index(action_st) lowercase : Optional[int] = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) lowercase : Optional[Any] = show_type == 'Show full text of passages' else: lowercase : str = 3 lowercase : List[Any] = True lowercase : Union[str, Any] = st.sidebar.checkbox('Retrieval options') if retrieval_options: lowercase : Optional[int] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) lowercase : List[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) lowercase : str = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: lowercase : List[Any] = 'wiki40b' lowercase : Union[str, Any] = 'dense' lowercase : List[str] = 'beam' lowercase : str = 2 lowercase : Union[str, Any] = 64 lowercase : List[str] = 2_56 lowercase : List[str] = None lowercase : int = None lowercase : Optional[int] = st.sidebar.checkbox('Generation options') if generate_options: lowercase : str = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) lowercase : int = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) lowercase : List[str] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) lowercase : Union[str, Any] = st.sidebar.slider( 'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: lowercase : int = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) lowercase : List[Any] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) lowercase : List[Any] = None # start main text lowercase : str = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] lowercase : int = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": lowercase : str = st.text_input('Enter your question here:', '') else: lowercase : str = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": lowercase , lowercase : str = make_support(question, source=wiki_source, method='dense', n_results=10) lowercase , lowercase : Any = make_support(question, source=wiki_source, method='sparse', n_results=10) lowercase : Dict = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] lowercase : List[str] = support_list[:10] lowercase : Dict = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: lowercase , lowercase : Any = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: lowercase , lowercase : List[str] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): lowercase : Tuple = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) lowercase : Union[str, Any] = res[1].strip() if sec_titles == "": lowercase : Tuple = '[{}]({})'.format(res[0], wiki_url) else: lowercase : Optional[int] = sec_titles.split(' & ') lowercase : Tuple = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style=\"font-family:arial; font-size:10pt;\">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: lowercase : List[str] = find_nearest_training(question) lowercase : str = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) lowercase : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) lowercase : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
3
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' from numpy import exp, pi, sqrt def lowercase_ ( _lowercase , _lowercase = 0.0 , _lowercase = 1.0 ) -> int: '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
318
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __A( __lowerCAmelCase ): snake_case_ = '''char''' snake_case_ = '''bpe''' snake_case_ = '''wp''' A : Dict = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __A( __lowerCAmelCase ): snake_case_ = ['''image_processor''', '''char_tokenizer'''] snake_case_ = '''ViTImageProcessor''' snake_case_ = '''MgpstrTokenizer''' def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> str: '''simple docstring''' __a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowerCamelCase_ , ) __a = kwargs.pop('''feature_extractor''' ) __a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __a = tokenizer __a = AutoTokenizer.from_pretrained('''gpt2''' ) __a = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]: '''simple docstring''' if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if text is not None: __a = self.char_tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if text is None: return inputs elif images is None: return encodings else: __a = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int: '''simple docstring''' __a , __a , __a = sequences __a = char_preds.size(0 ) __a , __a = self._decode_helper(lowerCamelCase_ , '''char''' ) __a , __a = self._decode_helper(lowerCamelCase_ , '''bpe''' ) __a , __a = self._decode_helper(lowerCamelCase_ , '''wp''' ) __a = [] __a = [] for i in range(lowerCamelCase_ ): __a = [char_scores[i], bpe_scores[i], wp_scores[i]] __a = [char_strs[i], bpe_strs[i], wp_strs[i]] __a = scores.index(max(lowerCamelCase_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __a = {} __a = final_strs __a = final_scores __a = char_strs __a = bpe_strs __a = wp_strs return out def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int: '''simple docstring''' if format == DecodeType.CHARACTER: __a = self.char_decode __a = 1 __a = '''[s]''' elif format == DecodeType.BPE: __a = self.bpe_decode __a = 2 __a = '''#''' elif format == DecodeType.WORDPIECE: __a = self.wp_decode __a = 102 __a = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) __a , __a = [], [] __a = pred_logits.size(0 ) __a = pred_logits.size(1 ) __a , __a = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase_ , sorted=lowerCamelCase_ ) __a = preds_index.view(-1 , lowerCamelCase_ )[:, 1:] __a = decoder(lowerCamelCase_ ) __a , __a = torch.nn.functional.softmax(lowerCamelCase_ , dim=2 ).max(dim=2 ) __a = preds_max_prob[:, 1:] for index in range(lowerCamelCase_ ): __a = preds_str[index].find(lowerCamelCase_ ) __a = preds_str[index][:pred_eos] __a = preds_index[index].cpu().tolist() __a = pred_index.index(lowerCamelCase_ ) if eos_token in pred_index else -1 __a = preds_max_prob[index][: pred_eos_index + 1] __a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCamelCase_ ) conf_scores.append(lowerCamelCase_ ) return dec_strs, conf_scores def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase_ )] return decode_strs def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str: '''simple docstring''' return self.bpe_tokenizer.batch_decode(lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple: '''simple docstring''' __a = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase_ )] return decode_strs
6
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
'''simple docstring''' import colorsys from PIL import Image # type: ignore def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Optional[Any] ) -> float: _a = x _a = y for step in range(UpperCamelCase_ ): # noqa: B007 _a = a * a - b * b + x _a = 2 * a * b + y _a = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCamelCase ( lowercase : Dict ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _lowerCamelCase ( lowercase : int ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) ) def _lowerCamelCase ( lowercase : Union[str, Any] = 800 , lowercase : Optional[int] = 600 , lowercase : Optional[int] = -0.6 , lowercase : List[str] = 0 , lowercase : List[str] = 3.2 , lowercase : Optional[int] = 50 , lowercase : str = True , ) -> Image.Image: _a = Image.new("RGB" , (image_width, image_height) ) _a = img.load() # loop through the image-coordinates for image_x in range(UpperCamelCase_ ): for image_y in range(UpperCamelCase_ ): # determine the figure-coordinates based on the image-coordinates _a = figure_width / image_width * image_height _a = figure_center_x + (image_x / image_width - 0.5) * figure_width _a = figure_center_y + (image_y / image_height - 0.5) * figure_height _a = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _a = get_color_coded_rgb(UpperCamelCase_ ) else: _a = get_black_and_white_rgb(UpperCamelCase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCAmelCase_ : Dict = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
63
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ : str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = XLNetTokenizer lowerCamelCase = XLNetTokenizerFast lowerCamelCase = True lowerCamelCase = True def UpperCAmelCase_ ( self ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing A_ : str = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Dict = """<s>""" A_ : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(_lowerCamelCase ) , 1006 ) def UpperCAmelCase_ ( self ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] ) A_ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A_ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) A_ : Dict = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : List[str] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase ) A_ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def UpperCAmelCase_ ( self ) -> List[Any]: A_ : Any = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase ) A_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def UpperCAmelCase_ ( self ) -> int: A_ : Optional[int] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) A_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase ) A_ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase ) A_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: # fmt: off A_ : int = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
344
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''van''' def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=3 , _lowerCamelCase=[7, 3, 3, 3] , _lowerCamelCase=[4, 2, 2, 2] , _lowerCamelCase=[64, 128, 320, 512] , _lowerCamelCase=[3, 3, 12, 3] , _lowerCamelCase=[8, 8, 4, 4] , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-6 , _lowerCamelCase=1e-2 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , **_lowerCamelCase , ) -> List[str]: super().__init__(**_lowerCamelCase ) A_ : Dict = image_size A_ : Dict = num_channels A_ : Optional[int] = patch_sizes A_ : List[Any] = strides A_ : Union[str, Any] = hidden_sizes A_ : List[str] = depths A_ : Any = mlp_ratios A_ : List[str] = hidden_act A_ : Dict = initializer_range A_ : int = layer_norm_eps A_ : Any = layer_scale_init_value A_ : Optional[Any] = drop_path_rate A_ : Dict = dropout_rate
344
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''distilbert''' lowerCamelCase = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]: A_ : Tuple = vocab_size A_ : List[Any] = max_position_embeddings A_ : int = sinusoidal_pos_embds A_ : int = n_layers A_ : str = n_heads A_ : Optional[int] = dim A_ : int = hidden_dim A_ : Tuple = dropout A_ : List[Any] = attention_dropout A_ : int = activation A_ : Dict = initializer_range A_ : List[Any] = qa_dropout A_ : int = seq_classif_dropout super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase ) class _lowerCAmelCase ( __A ): """simple docstring""" @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
344
1
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''EncodecFeatureExtractor''' lowerCamelCase = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: super().__init__(_lowerCamelCase , _lowerCamelCase ) A_ : int = self.feature_extractor A_ : List[str] = False def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Optional[Any]: return self.tokenizer.get_decoder_prompt_ids(task=_lowerCamelCase , language=_lowerCamelCase , no_timestamps=_lowerCamelCase ) def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) A_ : int = kwargs.pop("""audio""" , _lowerCamelCase ) A_ : str = kwargs.pop("""sampling_rate""" , _lowerCamelCase ) A_ : Optional[Any] = kwargs.pop("""text""" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: A_ : List[str] = args[0] A_ : Any = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: A_ : Any = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if audio is not None: A_ : Tuple = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: A_ : Tuple = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: A_ : int = audio_inputs["""padding_mask"""] return inputs def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any: A_ : Optional[int] = kwargs.pop("""audio""" , _lowerCamelCase ) A_ : List[str] = kwargs.pop("""padding_mask""" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: A_ : Dict = args[0] A_ : Optional[Any] = args[1:] if audio_values is not None: return self._decode_audio(_lowerCamelCase , padding_mask=_lowerCamelCase ) else: return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]: return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[np.ndarray]: A_ : Any = to_numpy(_lowerCamelCase ) A_ , A_ , A_ : Optional[Any] = audio_values.shape if padding_mask is None: return list(_lowerCamelCase ) A_ : Union[str, Any] = to_numpy(_lowerCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) A_ : Union[str, Any] = seq_len - padding_mask.shape[-1] A_ : Optional[int] = 1 - self.feature_extractor.padding_value A_ : List[str] = np.pad(_lowerCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=_lowerCamelCase ) A_ : List[str] = audio_values.tolist() for i in range(_lowerCamelCase ): A_ : Optional[Any] = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] A_ : Tuple = sliced_audio.reshape(_lowerCamelCase , -1 ) return audio_values
344
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) UpperCamelCase__ : Any = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]: """simple docstring""" A_ : int = state_dict.pop(a_ ) A_ : Tuple = val def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" A_ : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) A_ : str = value else: A_ : int = value return new_state_dict def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]: """simple docstring""" A_ : List[Any] = """""" if is_panoptic: A_ : Any = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict A_ : Optional[Any] = in_proj_weight[:2_5_6, :] A_ : Tuple = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : int = in_proj_bias[2_5_6:5_1_2] A_ : int = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] def UpperCAmelCase ( ) -> Dict: """simple docstring""" A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def UpperCAmelCase ( a_ , a_ ) -> Dict: """simple docstring""" A_ : int = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: A_ : str = """resnet101""" if "dc5" in model_name: A_ : List[Any] = True A_ : str = """panoptic""" in model_name if is_panoptic: A_ : Dict = 2_5_0 else: A_ : Union[str, Any] = 9_1 A_ : str = """huggingface/label-files""" A_ : Union[str, Any] = """coco-detection-id2label.json""" A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) ) A_ : str = {int(a_ ): v for k, v in idalabel.items()} A_ : Optional[int] = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} # load image processor A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection""" A_ : Any = ConditionalDetrImageProcessor(format=a_ ) # prepare image A_ : Tuple = prepare_img() A_ : Any = image_processor(images=a_ , return_tensors="""pt""" ) A_ : Optional[int] = encoding["""pixel_values"""] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval() A_ : List[Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: A_ : Union[str, Any] = """conditional_detr.""" + src rename_key(a_ , a_ , a_ ) A_ : Any = rename_backbone_keys(a_ ) # query, key and value matrices need special treatment read_in_q_k_v(a_ , is_panoptic=a_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): A_ : Dict = state_dict.pop(a_ ) A_ : List[Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: A_ : str = state_dict.pop(a_ ) A_ : Any = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: A_ : Optional[int] = state_dict.pop(a_ ) A_ : str = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): A_ : Tuple = state_dict.pop(a_ ) A_ : Dict = val # finally, create HuggingFace model and load state dict A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ ) model.load_state_dict(a_ ) model.eval() model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion A_ : str = conditional_detr(a_ ) A_ : str = model(a_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) UpperCamelCase__ : Optional[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
344
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : Tuple = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = (UnCLIPScheduler,) def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]: A_ : Union[str, Any] = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> str: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> str: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: A_ : Optional[int] = self.scheduler_classes[0] A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" ) A_ : List[Any] = scheduler_class(**_lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5 def UpperCAmelCase_ ( self ) -> Optional[int]: A_ : List[Any] = self.scheduler_classes[0] A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" ) A_ : Dict = scheduler_class(**_lowerCamelCase ) A_ : Dict = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5 def UpperCAmelCase_ ( self ) -> Any: A_ : Optional[Any] = self.scheduler_classes[0] A_ : Tuple = self.get_scheduler_config() A_ : Optional[Any] = scheduler_class(**_lowerCamelCase ) A_ : int = scheduler.timesteps A_ : List[Any] = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Optional[Any] = torch.manual_seed(0 ) for i, t in enumerate(_lowerCamelCase ): # 1. predict noise residual A_ : Any = model(_lowerCamelCase , _lowerCamelCase ) # 2. predict previous mean of sample x_t-1 A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample A_ : List[Any] = pred_prev_sample A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1e-2 assert abs(result_mean.item() - 0.328_4743 ) < 1e-3 def UpperCAmelCase_ ( self ) -> Dict: A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Dict = self.get_scheduler_config() A_ : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(25 ) A_ : List[str] = scheduler.timesteps A_ : List[Any] = self.dummy_model() A_ : List[Any] = self.dummy_sample_deter A_ : List[Any] = torch.manual_seed(0 ) for i, t in enumerate(_lowerCamelCase ): # 1. predict noise residual A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase ) if i + 1 == timesteps.shape[0]: A_ : List[str] = None else: A_ : Dict = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 A_ : str = scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample A_ : Optional[Any] = pred_prev_sample A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1e-2 assert abs(result_mean.item() - 0.336_2038 ) < 1e-3 def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> int: pass
344
1
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ : List[Any] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCamelCase__ : List[Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''mask2former''' lowerCamelCase = ['''swin'''] lowerCamelCase = {'''hidden_size''': '''hidden_dim'''} def __init__( self , _lowerCamelCase = None , _lowerCamelCase = 256 , _lowerCamelCase = 256 , _lowerCamelCase = 256 , _lowerCamelCase = 1024 , _lowerCamelCase = "relu" , _lowerCamelCase = 6 , _lowerCamelCase = 10 , _lowerCamelCase = 8 , _lowerCamelCase = 0.0 , _lowerCamelCase = 2048 , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = 4 , _lowerCamelCase = 255 , _lowerCamelCase = 100 , _lowerCamelCase = 0.1 , _lowerCamelCase = 2.0 , _lowerCamelCase = 5.0 , _lowerCamelCase = 5.0 , _lowerCamelCase = 1_2544 , _lowerCamelCase = 3.0 , _lowerCamelCase = 0.75 , _lowerCamelCase = 0.02 , _lowerCamelCase = 1.0 , _lowerCamelCase = True , _lowerCamelCase = [4, 8, 16, 32] , _lowerCamelCase = None , **_lowerCamelCase , ) -> str: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) A_ : str = CONFIG_MAPPING["""swin"""]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Dict = backbone_config.pop("""model_type""" ) A_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] A_ : List[Any] = config_class.from_dict(_lowerCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " F"Supported model types: {','.join(self.backbones_supported )}" ) A_ : Any = backbone_config A_ : Optional[Any] = feature_size A_ : str = mask_feature_size A_ : Optional[Any] = hidden_dim A_ : List[Any] = encoder_feedforward_dim A_ : List[Any] = activation_function A_ : Optional[Any] = encoder_layers A_ : Optional[Any] = decoder_layers A_ : str = num_attention_heads A_ : int = dropout A_ : int = dim_feedforward A_ : Optional[Any] = pre_norm A_ : str = enforce_input_projection A_ : List[str] = common_stride A_ : str = ignore_value A_ : Any = num_queries A_ : int = no_object_weight A_ : int = class_weight A_ : int = mask_weight A_ : List[Any] = dice_weight A_ : List[Any] = train_num_points A_ : Optional[Any] = oversample_ratio A_ : Optional[int] = importance_sample_ratio A_ : Tuple = init_std A_ : Tuple = init_xavier_std A_ : List[Any] = use_auxiliary_loss A_ : List[Any] = feature_strides A_ : List[Any] = output_auxiliary_logits A_ : Tuple = decoder_layers super().__init__(**_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> List[str]: return cls( backbone_config=_lowerCamelCase , **_lowerCamelCase , ) def UpperCAmelCase_ ( self ) -> Dict[str, any]: A_ : str = copy.deepcopy(self.__dict__ ) A_ : str = self.backbone_config.to_dict() A_ : int = self.__class__.model_type return output
344
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]: A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20} A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} A_ : Optional[Any] = parent A_ : Optional[int] = batch_size A_ : Union[str, Any] = num_channels A_ : str = image_size A_ : Tuple = min_resolution A_ : Dict = max_resolution A_ : str = do_resize A_ : Tuple = size A_ : int = do_center_crop A_ : Dict = crop_size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : Optional[Any] = image_std A_ : Any = do_reduce_labels def UpperCAmelCase_ ( self ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def UpperCAmelCase ( ) -> List[str]: """simple docstring""" A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) A_ : Tuple = Image.open(dataset[0]["""file"""] ) A_ : Dict = Image.open(dataset[1]["""file"""] ) return image, map def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) A_ : Tuple = Image.open(ds[0]["""file"""] ) A_ : List[Any] = Image.open(ds[1]["""file"""] ) A_ : Any = Image.open(ds[2]["""file"""] ) A_ : str = Image.open(ds[3]["""file"""] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = BeitImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Dict: A_ : List[Any] = BeitImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Optional[int]: A_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase ) A_ : int = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> Dict: # Initialize image_processing A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ) -> List[str]: # Initialize image_processing A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ) -> str: # Initialize image_processing A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ) -> Optional[int]: # Initialize image_processing A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) A_ : Optional[int] = [] for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test batched A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test not batched input (PIL images) A_ , A_ : List[Any] = prepare_semantic_single_inputs() A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test batched input (PIL images) A_ , A_ : str = prepare_semantic_batch_inputs() A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 2, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) def UpperCAmelCase_ ( self ) -> Tuple: # Initialize image_processing A_ : Any = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 A_ , A_ : Tuple = prepare_semantic_single_inputs() A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 150 ) A_ : str = True A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 )
344
1
'''simple docstring''' def UpperCAmelCase ( a_ ) -> bool: """simple docstring""" if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) A_ : List[str] = sorted(string.lower() ) return len(a_ ) == len(set(a_ ) ) if __name__ == "__main__": UpperCamelCase__ : int = input('Enter a string ').strip() UpperCamelCase__ : List[Any] = is_isogram(input_str) print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
344
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _lowerCAmelCase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str: super().__init__() A_ : Optional[Any] = pad_token_id A_ : List[Any] = max_length A_ : str = vocab A_ : Union[str, Any] = merges A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int: A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()] A_ : Dict = tokenizer.get_vocab() return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str: A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]: return cls(**_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any: A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase ) A_ : Any = tf.ones_like(_lowerCamelCase ) if self.pad_token_id is not None: # pad the tokens up to max length A_ : List[Any] = max_length if max_length is not None else self.max_length if max_length is not None: A_ , A_ : Tuple = pad_model_inputs( _lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
344
1
'''simple docstring''' def UpperCAmelCase ( a_ ) -> bool: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') UpperCamelCase__ : Tuple = int(input('Enter number: ').strip()) print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
344
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ['YolosFeatureExtractor'] UpperCamelCase__ : int = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _lowerCAmelCase ( __A ): """simple docstring""" def __lt__( self , _lowerCamelCase ) -> List[Any]: return self[-1] < other[-1] def __eq__( self , _lowerCamelCase ) -> List[Any]: return self[-1] == other[-1] def UpperCAmelCase ( a_ ) -> list: """simple docstring""" A_ : list[Stack] = [] # sort into stacks for element in collection: A_ : Any = Stack([element] ) A_ : Optional[int] = bisect_left(a_ , a_ ) if i != len(a_ ): stacks[i].append(a_ ) else: stacks.append(a_ ) # use a heap-based merge to merge stack efficiently A_ : Any = merge(*(reversed(a_ ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() UpperCamelCase__ : List[str] = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
344
'''simple docstring''' class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: A_ : Optional[Any] = name A_ : Dict = value A_ : Union[str, Any] = weight def __repr__( self ) -> List[str]: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def UpperCAmelCase_ ( self ) -> Optional[Any]: return self.value def UpperCAmelCase_ ( self ) -> List[str]: return self.name def UpperCAmelCase_ ( self ) -> Tuple: return self.weight def UpperCAmelCase_ ( self ) -> Optional[int]: return self.value / self.weight def UpperCAmelCase ( a_ , a_ , a_ ) -> str: """simple docstring""" A_ : Optional[int] = [] for i in range(len(a_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]: """simple docstring""" A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ ) A_ : str = [] A_ , A_ : Dict = 0.0, 0.0 for i in range(len(a_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
344
1
'''simple docstring''' def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" stooge(a_ , 0 , len(a_ ) - 1 ) return arr def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: A_ , A_ : int = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: A_ : Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(a_ , a_ , (h - t) ) # Recursively sort last 2/3 elements stooge(a_ , i + t , (a_) ) # Recursively sort first 2/3 elements stooge(a_ , a_ , (h - t) ) if __name__ == "__main__": UpperCamelCase__ : List[Any] = input('Enter numbers separated by a comma:\n').strip() UpperCamelCase__ : Union[str, Any] = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
344
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def UpperCAmelCase ( a_ , a_ ) -> tuple: """simple docstring""" if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
344
1
'''simple docstring''' UpperCamelCase__ : List[Any] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Optional[int]: """simple docstring""" A_ : Any = [False] * len(a_ ) A_ : List[str] = [s] A_ : int = True while queue: A_ : Tuple = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(a_ ) A_ : Union[str, Any] = True A_ : List[str] = u return visited[t] def UpperCAmelCase ( a_ , a_ , a_ ) -> int: """simple docstring""" A_ : Optional[Any] = [-1] * (len(a_ )) A_ : List[Any] = 0 A_ : List[str] = [] A_ : List[Any] = [i[:] for i in graph] # Record original cut, copy. while bfs(a_ , a_ , a_ , a_ ): A_ : Any = float("""Inf""" ) A_ : Tuple = sink while s != source: # Find the minimum value in select path A_ : Optional[int] = min(a_ , graph[parent[s]][s] ) A_ : Tuple = parent[s] max_flow += path_flow A_ : str = sink while v != source: A_ : List[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A_ : str = parent[v] for i in range(len(a_ ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
344
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any: A_ : List[Any] = parent A_ : int = config_class A_ : int = has_text_modality A_ : str = kwargs A_ : int = common_properties def UpperCAmelCase_ ( self ) -> str: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : Optional[int] = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(_lowerCamelCase ): try: setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowerCamelCase ): try: A_ : List[str] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = self.config_class(**self.inputs_dict ) A_ : Optional[int] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" ) config_first.to_json_file(_lowerCamelCase ) A_ : Dict = self.config_class.from_json_file(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Any = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : List[Any] = """test""" with tempfile.TemporaryDirectory() as tmpdirname: A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) config_first.save_pretrained(_lowerCamelCase ) A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) A_ : str = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_class.is_composition: return A_ : Dict = self.config_class() self.parent.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: A_ : Any = copy.deepcopy(_lowerCamelCase ) A_ : Tuple = self.config_class(**_lowerCamelCase ) A_ : Optional[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(_lowerCamelCase , _lowerCamelCase ) != value: wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) ) if len(_lowerCamelCase ) > 0: A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
344
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : List[str] = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" try: with open(a_ , """rb""" ) as flax_state_f: A_ : Tuple = from_bytes(a_ , flax_state_f.read() ) except UnpicklingError as e: try: with open(a_ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(a_ , a_ ) def UpperCAmelCase ( a_ , a_ ) -> Any: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values() if any(a_ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) A_ : str = jax.tree_util.tree_map( lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ ) A_ : Any = """""" A_ : Optional[int] = flatten_dict(a_ , sep=""".""" ) A_ : List[str] = pt_model.state_dict() # keep track of unexpected & missing keys A_ : Union[str, Any] = [] A_ : Dict = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A_ : List[Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""] A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": A_ : int = flax_key_tuple_array[:-1] + ["""weight"""] A_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(a_ ): A_ : Tuple = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) A_ : Dict = """.""".join(a_ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor A_ : Tuple = torch.from_numpy(a_ ) # remove from missing keys missing_keys.remove(a_ ) else: # weight is not expected by PyTorch model unexpected_keys.append(a_ ) pt_model.load_state_dict(a_ ) # re-transform missing_keys to list A_ : Dict = list(a_ ) if len(a_ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(a_ ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) return pt_model
344
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''Wav2Vec2FeatureExtractor''' lowerCamelCase = '''AutoTokenizer''' def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Dict: super().__init__(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = self.feature_extractor A_ : Dict = False @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> List[Any]: try: return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase ) except OSError: warnings.warn( F"Loading a tokenizer inside {cls.__name__} from a config that does not" """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ , _lowerCamelCase , ) A_ : str = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[Any] = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) A_ : Union[str, Any] = kwargs.pop("""raw_speech""" ) else: A_ : Tuple = kwargs.pop("""audio""" , _lowerCamelCase ) A_ : List[Any] = kwargs.pop("""sampling_rate""" , _lowerCamelCase ) A_ : int = kwargs.pop("""text""" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: A_ : str = args[0] A_ : Tuple = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: A_ : Any = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: A_ : Dict = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: A_ : Union[str, Any] = encodings["""input_ids"""] return inputs def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase ) A_ : str = kwargs.pop("""input_features""" , _lowerCamelCase ) A_ : Any = kwargs.pop("""labels""" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: A_ : Optional[int] = args[0] A_ : Any = args[1:] if input_features is not None: A_ : str = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) if labels is not None: A_ : Optional[int] = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: A_ : List[Any] = labels["""input_ids"""] return input_features def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]: return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]: return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def UpperCAmelCase_ ( self ) -> List[str]: warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) A_ : List[str] = True A_ : Optional[Any] = self.tokenizer yield A_ : Any = self.feature_extractor A_ : Union[str, Any] = False
344
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]: A_ : Any = parent A_ : List[Any] = batch_size A_ : List[Any] = image_size A_ : Optional[int] = num_channels A_ : Tuple = embeddings_size A_ : str = hidden_sizes A_ : Optional[Any] = depths A_ : Any = is_training A_ : int = use_labels A_ : int = hidden_act A_ : Optional[Any] = num_labels A_ : str = scope A_ : Optional[int] = len(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Dict = None if self.use_labels: A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) A_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> Optional[Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: A_ : Dict = TFRegNetModel(config=_lowerCamelCase ) A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: A_ : Optional[Any] = self.num_labels A_ : int = TFRegNetForImageClassification(_lowerCamelCase ) A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ) -> str: A_ : Any = self.prepare_config_and_inputs() A_ , A_ , A_ : str = config_and_inputs A_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _lowerCAmelCase ( __A, __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Dict = TFRegNetModelTester(self ) A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> str: return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> Dict: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def UpperCAmelCase_ ( self ) -> int: super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def UpperCAmelCase_ ( self ) -> Optional[Any]: pass def UpperCAmelCase_ ( self ) -> int: A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(_lowerCamelCase ) A_ : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A_ : Optional[int] = model_class(_lowerCamelCase ) A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase ) A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Dict = layer_type A_ : List[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : str = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ): A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple() def recursive_check(_lowerCamelCase , _lowerCamelCase ): if isinstance(_lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ): recursive_check(_lowerCamelCase , _lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=( """Tuple and dict output are not equal. Difference:""" F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}" ) , ) recursive_check(_lowerCamelCase , _lowerCamelCase ) for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(_lowerCamelCase ) A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} ) A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} ) def UpperCAmelCase_ ( self ) -> str: A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Tuple: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ ( self ) -> int: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A_ : Tuple = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" ) # forward pass A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase ) # verify the logits A_ : Optional[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
344
1
'''simple docstring''' UpperCamelCase__ : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.3_5_5_8_1_8, } def UpperCAmelCase ( a_ , a_ , a_ ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: A_ : Tuple = ( F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" F"Valid values are: {', '.join(a_ )}" ) raise ValueError(a_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
344
'''simple docstring''' def UpperCAmelCase ( a_ = 1_0_0 ) -> int: """simple docstring""" A_ : Dict = n * (n + 1) * (2 * n + 1) / 6 A_ : Optional[int] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'{solution() = }')
344
1
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any: A_ : List[Any] = parent A_ : int = config_class A_ : int = has_text_modality A_ : str = kwargs A_ : int = common_properties def UpperCAmelCase_ ( self ) -> str: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : Optional[int] = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(_lowerCamelCase ): try: setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowerCamelCase ): try: A_ : List[str] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = self.config_class(**self.inputs_dict ) A_ : Optional[int] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" ) config_first.to_json_file(_lowerCamelCase ) A_ : Dict = self.config_class.from_json_file(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Any = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : List[Any] = """test""" with tempfile.TemporaryDirectory() as tmpdirname: A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) config_first.save_pretrained(_lowerCamelCase ) A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) A_ : str = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_class.is_composition: return A_ : Dict = self.config_class() self.parent.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: A_ : Any = copy.deepcopy(_lowerCamelCase ) A_ : Tuple = self.config_class(**_lowerCamelCase ) A_ : Optional[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(_lowerCamelCase , _lowerCamelCase ) != value: wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) ) if len(_lowerCamelCase ) > 0: A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
344
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCamelCase__ : int = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' import string from math import logaa def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" A_ : str = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) A_ : List[Any] = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase ( a_ , a_ ) -> tuple[int, int]: """simple docstring""" A_ : Dict = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' A_ : Optional[Any] = corpus_without_punctuation.split("""\n""" ) A_ : int = term.lower() return (len([doc for doc in docs if term in doc] ), len(a_ )) def UpperCAmelCase ( a_ , a_ , a_=False ) -> float: """simple docstring""" if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" return round(tf * idf , 3 )
344
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]: """simple docstring""" A_ : List[str] = [] A_ : Dict = [] A_ : List[Any] = [] for rt in rc.restypes: A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) A_ : Tuple = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : Optional[int] = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : List[Any] = torch.tensor( a_ , dtype=torch.floataa , device=protein["""aatype"""].device , ) A_ : Optional[int] = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein A_ : Dict = restype_atomaa_to_atomaa[protein_aatype] A_ : Optional[Any] = restype_atomaa_mask[protein_aatype] A_ : Any = residx_atomaa_mask A_ : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype] A_ : Tuple = residx_atomaa_to_atomaa.long() # create the corresponding mask A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): A_ : Optional[Any] = rc.restype_atoa[restype_letter] A_ : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: A_ : Any = rc.atom_order[atom_name] A_ : Optional[int] = 1 A_ : Optional[int] = restype_atomaa_mask[protein_aatype] A_ : Dict = residx_atomaa_mask return protein def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]: """simple docstring""" A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray ) A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) ) return out
344
1
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model') @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = GPTSwaTokenizer lowerCamelCase = False lowerCamelCase = True lowerCamelCase = False def UpperCAmelCase_ ( self ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing A_ : Tuple = GPTSwaTokenizer(_lowerCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]: A_ : str = """This is a test""" A_ : Dict = """This is a test""" return input_text, output_text def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = """<s>""" A_ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(_lowerCamelCase ) , 2000 ) def UpperCAmelCase_ ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def UpperCAmelCase_ ( self ) -> Dict: A_ : Dict = GPTSwaTokenizer(_lowerCamelCase ) A_ : List[str] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [465, 287, 265, 631, 842] ) A_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) # fmt: off self.assertListEqual( _lowerCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , ) # fmt: on A_ : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) A_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) # fmt: off self.assertListEqual( _lowerCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ) # fmt: on def UpperCAmelCase_ ( self ) -> int: A_ : Any = GPTSwaTokenizer(_lowerCamelCase ) A_ : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""] A_ : Dict = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_lowerCamelCase , _lowerCamelCase ): self.assertListEqual(tokenizer.encode_fast(_lowerCamelCase ) , _lowerCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(_lowerCamelCase , _lowerCamelCase ): self.assertEqual(tokenizer.decode_fast(_lowerCamelCase ) , _lowerCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Tuple: A_ : Dict = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off A_ : Dict = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=_lowerCamelCase , )
344
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str: A_ : Optional[int] = parent A_ : Dict = batch_size A_ : List[Any] = image_size A_ : Optional[int] = patch_size A_ : List[str] = num_channels A_ : List[Any] = is_training A_ : Union[str, Any] = use_labels A_ : Union[str, Any] = hidden_size A_ : str = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Union[str, Any] = intermediate_size A_ : Any = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : Dict = type_sequence_label_size A_ : Optional[int] = initializer_range A_ : str = scope A_ : Optional[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A_ : Tuple = (image_size // patch_size) ** 2 A_ : Union[str, Any] = num_patches + 2 def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Dict = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> int: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: A_ : List[str] = DeiTModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Dict = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ : Dict = 1 A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : int = model(_lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: A_ : Tuple = self.type_sequence_label_size A_ : Tuple = DeiTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Dict = 1 A_ : Any = DeiTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : List[Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __A, __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowerCamelCase = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : int = DeiTModelTester(self ) A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> Optional[int]: pass def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Union[str, Any] = [*signature.parameters.keys()] A_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]: A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase_ ( self ) -> Optional[Any]: if not self.model_tester.is_training: return A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A_ : List[str] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : List[str] = model(**_lowerCamelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> int: A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A_ : Any = False A_ : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A_ : List[Any] = model_class(_lowerCamelCase ) model.gradient_checkpointing_enable() model.to(_lowerCamelCase ) model.train() A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : Union[str, Any] = model(**_lowerCamelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> Tuple: A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Optional[Any] = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_lowerCamelCase ), *get_values(_lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ): A_ : Dict = problem_type["""title"""] A_ : List[Any] = problem_type["""num_labels"""] A_ : List[str] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) if problem_type["num_labels"] > 1: A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list: A_ : List[str] = model(**_lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def UpperCAmelCase_ ( self ) -> Tuple: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : int = DeiTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ ( self ) -> Optional[Any]: return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( _lowerCamelCase ) A_ : Optional[int] = self.default_image_processor A_ : str = prepare_img() A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Any = model(**_lowerCamelCase ) # verify the logits A_ : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) A_ : int = self.default_image_processor A_ : List[str] = prepare_img() A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ) A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A_ : List[Any] = model(_lowerCamelCase )
344
1
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def UpperCAmelCase ( ) -> Any: """simple docstring""" raise RuntimeError("""CUDA out of memory.""" ) class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self ) -> Tuple: super().__init__() A_ : Union[str, Any] = nn.Linear(3 , 4 ) A_ : int = nn.BatchNormad(4 ) A_ : str = nn.Linear(4 , 5 ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]: return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ) -> List[str]: A_ : List[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(_lowerCamelCase ): nonlocal batch_sizes batch_sizes.append(_lowerCamelCase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowerCamelCase , [128, 64, 32, 16, 8] ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(_lowerCamelCase , _lowerCamelCase ): nonlocal batch_sizes batch_sizes.append(_lowerCamelCase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga A_ , A_ : List[Any] = mock_training_loop_function("""hello""" ) self.assertListEqual(_lowerCamelCase , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def UpperCAmelCase_ ( self ) -> List[str]: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowerCamelCase ): pass with self.assertRaises(_lowerCamelCase ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def UpperCAmelCase_ ( self ) -> List[Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowerCamelCase ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowerCamelCase ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def UpperCAmelCase_ ( self ) -> Any: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowerCamelCase ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def UpperCAmelCase_ ( self ) -> List[Any]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowerCamelCase ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(_lowerCamelCase ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Union[str, Any] = torch.cuda.memory_allocated() A_ : Any = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowerCamelCase ) A_ : Dict = release_memory(_lowerCamelCase ) self.assertEqual(torch.cuda.memory_allocated() , _lowerCamelCase )
344
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]: A_ : Optional[int] = parent A_ : Union[str, Any] = do_resize A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288} A_ : Tuple = size_divisor A_ : List[Any] = do_rescale A_ : Dict = rescale_factor A_ : List[Any] = do_normalize A_ : Dict = do_center_crop A_ : Optional[Any] = image_mean A_ : List[str] = image_std A_ : str = do_pad A_ : Any = batch_size A_ : List[str] = num_channels A_ : List[str] = min_resolution A_ : Union[str, Any] = max_resolution def UpperCAmelCase_ ( self ) -> Any: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]: if not batched: A_ : Union[str, Any] = self.size["""shortest_edge"""] A_ : Dict = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image ): A_ , A_ : Optional[Any] = image.size else: A_ , A_ : int = image.shape[1], image.shape[2] A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase ) if h < w: A_ , A_ : Optional[Any] = size, scale * w else: A_ , A_ : Dict = scale * h, size A_ : Union[str, Any] = int((1333 / 800) * size ) if max(_lowerCamelCase , _lowerCamelCase ) > max_size: A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase ) A_ : Dict = newh * scale A_ : Dict = neww * scale A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 ) A_ , A_ : Dict = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0] A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Dict: A_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> List[str]: # Initialize image processor A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: # Initialize image processor A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ) -> Tuple: # Initialize image processor A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
344
1
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase_ ( self ) -> Tuple: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self ) -> List[str]: A_ : Optional[int] = ort.SessionOptions() A_ : List[str] = False return options def UpperCAmelCase_ ( self ) -> List[Any]: A_ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) A_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) A_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default A_ : List[str] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) A_ : Optional[Any] = """A red cat sitting on a park bench""" A_ : Optional[Any] = np.random.RandomState(0 ) A_ : Optional[int] = pipe( prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowerCamelCase , output_type="""np""" , ) A_ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
344
'''simple docstring''' def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(a_ ): for j in range(a_ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )] for i in range(a_ ): for j in range(a_ ): A_ : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(a_ ): # looping through rows of graph array for i in range(a_ ): # looping through columns of graph array for j in range(a_ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): A_ : List[str] = dist[i][k] + dist[k][j] _print_dist(a_ , a_ ) return dist, v if __name__ == "__main__": UpperCamelCase__ : Tuple = int(input('Enter number of vertices: ')) UpperCamelCase__ : int = int(input('Enter number of edges: ')) UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): UpperCamelCase__ : Union[str, Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) UpperCamelCase__ : Union[str, Any] = int(input('Enter source:')) UpperCamelCase__ : int = int(input('Enter destination:')) UpperCamelCase__ : Optional[Any] = float(input('Enter weight:')) UpperCamelCase__ : Any = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
344
1
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. UpperCamelCase__ : Tuple = 10 def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" for i in range(a_ , a_ ): if array[i] == target: return i return -1 def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" A_ : Optional[Any] = 0 A_ : Optional[int] = len(a_ ) while left <= right: if right - left < precision: return lin_search(a_ , a_ , a_ , a_ ) A_ : str = (left + right) // 3 + 1 A_ : Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: A_ : Optional[int] = one_third - 1 elif array[two_third] < target: A_ : Optional[int] = two_third + 1 else: A_ : Union[str, Any] = one_third + 1 A_ : List[Any] = two_third - 1 else: return -1 def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if left < right: if right - left < precision: return lin_search(a_ , a_ , a_ , a_ ) A_ : Any = (left + right) // 3 + 1 A_ : Dict = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(a_ , one_third - 1 , a_ , a_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , a_ , a_ , a_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , a_ , a_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Any = input('Enter numbers separated by comma:\n').strip() UpperCamelCase__ : Optional[Any] = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." UpperCamelCase__ : Any = int(input('Enter the number to be found in the list:\n').strip()) UpperCamelCase__ : Optional[int] = ite_ternary_search(collection, target) UpperCamelCase__ : Any = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'Iterative search: {target} found at positions: {resulta}') print(f'Recursive search: {target} found at positions: {resulta}') else: print('Not found')
344
'''simple docstring''' import datasets from .evaluate import evaluate UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} A_ : List[Any] = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase ) return score
344
1
'''simple docstring''' def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(a_ ): for j in range(a_ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )] for i in range(a_ ): for j in range(a_ ): A_ : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(a_ ): # looping through rows of graph array for i in range(a_ ): # looping through columns of graph array for j in range(a_ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): A_ : List[str] = dist[i][k] + dist[k][j] _print_dist(a_ , a_ ) return dist, v if __name__ == "__main__": UpperCamelCase__ : Tuple = int(input('Enter number of vertices: ')) UpperCamelCase__ : int = int(input('Enter number of edges: ')) UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): UpperCamelCase__ : Union[str, Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) UpperCamelCase__ : Union[str, Any] = int(input('Enter source:')) UpperCamelCase__ : int = int(input('Enter destination:')) UpperCamelCase__ : Optional[Any] = float(input('Enter weight:')) UpperCamelCase__ : Any = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
344
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase__ : Any = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] UpperCamelCase__ : List[str] = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] UpperCamelCase__ : str = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): UpperCamelCase__ : List[str] = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' from math import sqrt def UpperCAmelCase ( a_ = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A_ : int = 0 A_ : int = 0 A_ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(a_ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
344
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''vision-encoder-decoder''' lowerCamelCase = True def __init__( self , **_lowerCamelCase ) -> str: super().__init__(**_lowerCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F"A configuraton of type {self.model_type} cannot be instantiated because " F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" ) A_ : Optional[int] = kwargs.pop("""encoder""" ) A_ : List[str] = encoder_config.pop("""model_type""" ) A_ : str = kwargs.pop("""decoder""" ) A_ : Optional[Any] = decoder_config.pop("""model_type""" ) A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) A_ : Any = True @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig: logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) A_ : int = True A_ : List[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Any: A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : List[str] = self.encoder.to_dict() A_ : Union[str, Any] = self.decoder.to_dict() A_ : str = self.__class__.model_type return output class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = version.parse('''1.11''' ) @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase_ ( self ) -> float: return 1e-4 @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class _lowerCAmelCase ( __A ): """simple docstring""" @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: A_ : Optional[Any] = OrderedDict() A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""} A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""} A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]: import torch A_ : Optional[int] = OrderedDict() A_ : List[Any] = super().generate_dummy_inputs( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) A_ , A_ : str = dummy_input["""input_ids"""].shape A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size) A_ : Union[str, Any] = dummy_input.pop("""input_ids""" ) A_ : List[str] = dummy_input.pop("""attention_mask""" ) A_ : Optional[int] = torch.zeros(_lowerCamelCase ) return common_inputs class _lowerCAmelCase ( __A ): """simple docstring""" @property def UpperCAmelCase_ ( self ) -> None: pass def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig: A_ : List[Any] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
344
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ) -> Dict: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p A_ : Optional[int] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} A_ : str = parent A_ : Optional[int] = batch_size A_ : int = num_channels A_ : Dict = min_resolution A_ : Union[str, Any] = max_resolution A_ : Optional[Any] = do_resize A_ : str = size A_ : Tuple = do_normalize A_ : Union[str, Any] = image_mean A_ : List[Any] = image_std A_ : str = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Union[str, Any] = do_pad def UpperCAmelCase_ ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Any: if not batched: A_ : List[str] = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image ): A_ , A_ : Optional[int] = image.size else: A_ , A_ : Union[str, Any] = image.shape[1], image.shape[2] if w < h: A_ : Tuple = int(self.size["""shortest_edge"""] * h / w ) A_ : Dict = self.size["""shortest_edge"""] elif w > h: A_ : Tuple = self.size["""shortest_edge"""] A_ : Optional[Any] = int(self.size["""shortest_edge"""] * w / h ) else: A_ : Any = self.size["""shortest_edge"""] A_ : Optional[int] = self.size["""shortest_edge"""] else: A_ : List[str] = [] for image in image_inputs: A_ , A_ : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : Union[str, Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0] A_ : Dict = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = DetaImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Dict: A_ : Optional[int] = DetaImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Tuple: A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_pad""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size""" ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: pass def UpperCAmelCase_ ( self ) -> Optional[int]: # Initialize image_processing A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ) -> int: # Initialize image_processing A_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ) -> str: # Initialize image_processing A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase_ ( self ) -> Tuple: # prepare image and target A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: A_ : List[Any] = json.loads(f.read() ) A_ : int = {"""image_id""": 3_9769, """annotations""": target} # encode them A_ : List[str] = DetaImageProcessor() A_ : Optional[int] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""" ) # verify pixel values A_ : Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase ) A_ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) ) # verify area A_ : Optional[int] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase ) ) # verify boxes A_ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase ) A_ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3 ) ) # verify image_id A_ : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase ) ) # verify is_crowd A_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase ) ) # verify class_labels A_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase ) ) # verify orig_size A_ : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase ) ) # verify size A_ : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase ) ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: # prepare image, target and masks_path A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: A_ : Any = json.loads(f.read() ) A_ : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} A_ : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them A_ : List[str] = DetaImageProcessor(format="""coco_panoptic""" ) A_ : Union[str, Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""" ) # verify pixel values A_ : int = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase ) A_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) ) # verify area A_ : List[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase ) ) # verify boxes A_ : int = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase ) A_ : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3 ) ) # verify image_id A_ : str = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase ) ) # verify is_crowd A_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase ) ) # verify class_labels A_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase ) ) # verify masks A_ : Dict = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase ) # verify orig_size A_ : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase ) ) # verify size A_ : Optional[int] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase ) )
344
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.' UpperCamelCase__ : List[Any] = 'file' @pytest.fixture(scope="""session""" ) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") A_ : int = bytes(a_ , """utf-8""" ) with zstd.open(a_ , """wb""" ) as f: f.write(a_ ) return path @pytest.fixture def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f: f.write(a_ ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]: """simple docstring""" A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} A_ : Any = input_paths[compression_format] A_ : Tuple = tmp_path / """cache""" A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ ) A_ : Dict = cached_path(a_ , download_config=a_ ) with open(a_ ) as f: A_ : Optional[Any] = f.read() with open(a_ ) as f: A_ : List[str] = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str: """simple docstring""" A_ : Union[str, Any] = """custom_cache""" A_ : List[str] = """custom_extracted_dir""" A_ : Optional[Any] = tmp_path / """custom_extracted_path""" if default_extracted: A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) ) A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A_ : List[Any] = xz_file A_ : Optional[int] = ( DownloadConfig(extract_compressed_file=a_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ ) ) A_ : Union[str, Any] = cached_path(a_ , download_config=a_ ) assert Path(a_ ).parent.parts[-2:] == expected def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" A_ : str = str(Path(a_ ).resolve() ) assert cached_path(a_ ) == text_file # relative path A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(a_ ) == text_file def UpperCAmelCase ( a_ ) -> int: """simple docstring""" A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(a_ ): cached_path(a_ ) # relative path A_ : Tuple = """./__missing_file__.txt""" with pytest.raises(a_ ): cached_path(a_ ) def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" ) with open(a_ ) as f: A_ : List[str] = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ ) def UpperCAmelCase ( ) -> List[str]: """simple docstring""" with pytest.raises(a_ ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ ) def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(a_ ): http_get("""https://huggingface.co""" , temp_file=a_ ) with pytest.raises(a_ ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ ) def UpperCAmelCase ( a_ ) -> int: """simple docstring""" A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(a_ ): ftp_get("""ftp://huggingface.co""" , temp_file=a_ ) with pytest.raises(a_ ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ ) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(a_ ): fsspec_get("""s3://huggingface.co""" , temp_file=a_ ) with pytest.raises(a_ ): fsspec_head("""s3://huggingface.co""" )
344
1
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _lowerCAmelCase : """simple docstring""" lowerCamelCase = 42 # setable values lowerCamelCase = 42 lowerCamelCase = 42 lowerCamelCase = None @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: return cls(common=_lowerCamelCase , init_noise_sigma=_lowerCamelCase , timesteps=_lowerCamelCase ) @dataclass class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = 42 class _lowerCAmelCase ( __A, __A ): """simple docstring""" lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers] lowerCamelCase = 42 @property def UpperCAmelCase_ ( self ) -> Optional[int]: return True @register_to_config def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = 0.0001 , _lowerCamelCase = 0.02 , _lowerCamelCase = "linear" , _lowerCamelCase = None , _lowerCamelCase = "fixed_small" , _lowerCamelCase = True , _lowerCamelCase = "epsilon" , _lowerCamelCase = jnp.floataa , ) -> Tuple: A_ : Tuple = dtype def UpperCAmelCase_ ( self , _lowerCamelCase = None ) -> DDPMSchedulerState: if common is None: A_ : str = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution A_ : Any = jnp.array(1.0 , dtype=self.dtype ) A_ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=_lowerCamelCase , init_noise_sigma=_lowerCamelCase , timesteps=_lowerCamelCase , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ) -> jnp.ndarray: return sample def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = () ) -> DDPMSchedulerState: A_ : Tuple = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 A_ : Tuple = (jnp.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ) -> Any: A_ : Union[str, Any] = state.common.alphas_cumprod[t] A_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A_ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: A_ : Optional[Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": A_ : str = jnp.clip(_lowerCamelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": A_ : Tuple = jnp.log(jnp.clip(_lowerCamelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": A_ : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log A_ : int = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": A_ : Any = variance A_ : Union[str, Any] = state.common.betas[t] A_ : Optional[Any] = (predicted_variance + 1) / 2 A_ : int = frac * max_log + (1 - frac) * min_log return variance def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: A_ : Optional[int] = timestep if key is None: A_ : List[str] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: A_ , A_ : List[Any] = jnp.split(_lowerCamelCase , sample.shape[1] , axis=1 ) else: A_ : Union[str, Any] = None # 1. compute alphas, betas A_ : Optional[Any] = state.common.alphas_cumprod[t] A_ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) A_ : str = 1 - alpha_prod_t A_ : Dict = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A_ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A_ : str = model_output elif self.config.prediction_type == "v_prediction": A_ : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " """ for the FlaxDDPMScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: A_ : Optional[Any] = jnp.clip(_lowerCamelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t A_ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): A_ : Dict = jax.random.split(_lowerCamelCase , num=1 ) A_ : int = jax.random.normal(_lowerCamelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(_lowerCamelCase , _lowerCamelCase , predicted_variance=_lowerCamelCase ) ** 0.5) * noise A_ : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) A_ : Dict = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=_lowerCamelCase , state=_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> jnp.ndarray: return add_noise_common(state.common , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> jnp.ndarray: return get_velocity_common(state.common , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __len__( self ) -> str: return self.config.num_train_timesteps
344
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : int = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json' ), 'distilbert-base-uncased-finetuned-sst-2-english': ( 'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json' ), } class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = '''distilbert''' lowerCamelCase = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]: A_ : Tuple = vocab_size A_ : List[Any] = max_position_embeddings A_ : int = sinusoidal_pos_embds A_ : int = n_layers A_ : str = n_heads A_ : Optional[int] = dim A_ : int = hidden_dim A_ : Tuple = dropout A_ : List[Any] = attention_dropout A_ : int = activation A_ : Dict = initializer_range A_ : List[Any] = qa_dropout A_ : int = seq_classif_dropout super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase ) class _lowerCAmelCase ( __A ): """simple docstring""" @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
344
1
'''simple docstring''' import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCamelCase__ : Dict = 4 UpperCamelCase__ : str = 3 class _lowerCAmelCase ( __A ): """simple docstring""" pass def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" for shard in shards: for i in range(a_ ): yield {"i": i, "shard": shard} def UpperCAmelCase ( ) -> Dict: """simple docstring""" A_ : Optional[int] = int(os.environ["""RANK"""] ) A_ : List[Any] = int(os.environ["""WORLD_SIZE"""] ) A_ : Tuple = ArgumentParser() parser.add_argument("""--streaming""" , type=a_ ) parser.add_argument("""--local_rank""" , type=a_ ) parser.add_argument("""--num_workers""" , type=a_ , default=0 ) A_ : Optional[Any] = parser.parse_args() A_ : List[Any] = args.streaming A_ : Dict = args.num_workers A_ : int = {"""shards""": [F"shard_{shard_idx}" for shard_idx in range(a_ )]} A_ : Dict = IterableDataset.from_generator(a_ , gen_kwargs=a_ ) if not streaming: A_ : Optional[int] = Dataset.from_list(list(a_ ) ) A_ : Tuple = split_dataset_by_node(a_ , rank=a_ , world_size=a_ ) A_ : List[Any] = torch.utils.data.DataLoader(a_ , num_workers=a_ ) A_ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD A_ : Dict = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) A_ : int = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" ) if __name__ == "__main__": main()
344
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) UpperCamelCase__ : Any = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]: """simple docstring""" A_ : int = state_dict.pop(a_ ) A_ : Tuple = val def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" A_ : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) A_ : str = value else: A_ : int = value return new_state_dict def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]: """simple docstring""" A_ : List[Any] = """""" if is_panoptic: A_ : Any = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict A_ : Optional[Any] = in_proj_weight[:2_5_6, :] A_ : Tuple = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : int = in_proj_bias[2_5_6:5_1_2] A_ : int = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] def UpperCAmelCase ( ) -> Dict: """simple docstring""" A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def UpperCAmelCase ( a_ , a_ ) -> Dict: """simple docstring""" A_ : int = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: A_ : str = """resnet101""" if "dc5" in model_name: A_ : List[Any] = True A_ : str = """panoptic""" in model_name if is_panoptic: A_ : Dict = 2_5_0 else: A_ : Union[str, Any] = 9_1 A_ : str = """huggingface/label-files""" A_ : Union[str, Any] = """coco-detection-id2label.json""" A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) ) A_ : str = {int(a_ ): v for k, v in idalabel.items()} A_ : Optional[int] = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} # load image processor A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection""" A_ : Any = ConditionalDetrImageProcessor(format=a_ ) # prepare image A_ : Tuple = prepare_img() A_ : Any = image_processor(images=a_ , return_tensors="""pt""" ) A_ : Optional[int] = encoding["""pixel_values"""] logger.info(F"Converting model {model_name}..." ) # load original model from torch hub A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval() A_ : List[Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: A_ : Union[str, Any] = """conditional_detr.""" + src rename_key(a_ , a_ , a_ ) A_ : Any = rename_backbone_keys(a_ ) # query, key and value matrices need special treatment read_in_q_k_v(a_ , is_panoptic=a_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): A_ : Dict = state_dict.pop(a_ ) A_ : List[Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: A_ : str = state_dict.pop(a_ ) A_ : Any = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: A_ : Optional[int] = state_dict.pop(a_ ) A_ : str = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): A_ : Tuple = state_dict.pop(a_ ) A_ : Dict = val # finally, create HuggingFace model and load state dict A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ ) model.load_state_dict(a_ ) model.eval() model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion A_ : str = conditional_detr(a_ ) A_ : str = model(a_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(a_ ).mkdir(exist_ok=a_ ) model.save_pretrained(a_ ) image_processor.save_pretrained(a_ ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) UpperCamelCase__ : Optional[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
344
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ) -> Tuple: A_ : List[Any] = get_activation("""swish""" ) self.assertIsInstance(_lowerCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase_ ( self ) -> List[Any]: A_ : Dict = get_activation("""silu""" ) self.assertIsInstance(_lowerCamelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Union[str, Any] = get_activation("""mish""" ) self.assertIsInstance(_lowerCamelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : List[Any] = get_activation("""gelu""" ) self.assertIsInstance(_lowerCamelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
344
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = (UnCLIPScheduler,) def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]: A_ : Union[str, Any] = { """num_train_timesteps""": 1000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**_lowerCamelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> str: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> str: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: A_ : Optional[int] = self.scheduler_classes[0] A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" ) A_ : List[Any] = scheduler_class(**_lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5 def UpperCAmelCase_ ( self ) -> Optional[int]: A_ : List[Any] = self.scheduler_classes[0] A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" ) A_ : Dict = scheduler_class(**_lowerCamelCase ) A_ : Dict = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5 def UpperCAmelCase_ ( self ) -> Any: A_ : Optional[Any] = self.scheduler_classes[0] A_ : Tuple = self.get_scheduler_config() A_ : Optional[Any] = scheduler_class(**_lowerCamelCase ) A_ : int = scheduler.timesteps A_ : List[Any] = self.dummy_model() A_ : str = self.dummy_sample_deter A_ : Optional[Any] = torch.manual_seed(0 ) for i, t in enumerate(_lowerCamelCase ): # 1. predict noise residual A_ : Any = model(_lowerCamelCase , _lowerCamelCase ) # 2. predict previous mean of sample x_t-1 A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample A_ : List[Any] = pred_prev_sample A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1e-2 assert abs(result_mean.item() - 0.328_4743 ) < 1e-3 def UpperCAmelCase_ ( self ) -> Dict: A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Dict = self.get_scheduler_config() A_ : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(25 ) A_ : List[str] = scheduler.timesteps A_ : List[Any] = self.dummy_model() A_ : List[Any] = self.dummy_sample_deter A_ : List[Any] = torch.manual_seed(0 ) for i, t in enumerate(_lowerCamelCase ): # 1. predict noise residual A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase ) if i + 1 == timesteps.shape[0]: A_ : List[str] = None else: A_ : Dict = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 A_ : str = scheduler.step( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample A_ : Optional[Any] = pred_prev_sample A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1e-2 assert abs(result_mean.item() - 0.336_2038 ) < 1e-3 def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> int: pass
344
1
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCamelCase__ : Dict = { 'b0': { 'hidden_dim': 1_280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1_280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1_408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1_536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1_792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2_048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2_304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2_560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def UpperCAmelCase ( a_ ) -> int: """simple docstring""" A_ : Tuple = EfficientNetConfig() A_ : int = CONFIG_MAP[model_name]["""hidden_dim"""] A_ : Optional[int] = CONFIG_MAP[model_name]["""width_coef"""] A_ : Union[str, Any] = CONFIG_MAP[model_name]["""depth_coef"""] A_ : int = CONFIG_MAP[model_name]["""image_size"""] A_ : str = CONFIG_MAP[model_name]["""dropout_rate"""] A_ : Optional[Any] = CONFIG_MAP[model_name]["""dw_padding"""] A_ : List[Any] = """huggingface/label-files""" A_ : List[str] = """imagenet-1k-id2label.json""" A_ : Optional[Any] = 1_0_0_0 A_ : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) ) A_ : str = {int(a_ ): v for k, v in idalabel.items()} A_ : Dict = idalabel A_ : str = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase ( ) -> Dict: """simple docstring""" A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Any = Image.open(requests.get(a_ , stream=a_ ).raw ) return im def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" A_ : Any = CONFIG_MAP[model_name]["""image_size"""] A_ : Optional[int] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=a_ , ) return preprocessor def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" A_ : str = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] A_ : List[str] = sorted(set(a_ ) ) A_ : Optional[int] = len(a_ ) A_ : str = {b: str(a_ ) for b, i in zip(a_ , range(a_ ) )} A_ : List[Any] = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: A_ : List[str] = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) A_ : Any = {} for item in rename_keys: if item[0] in original_param_names: A_ : Union[str, Any] = """efficientnet.""" + item[1] A_ : List[str] = """classifier.weight""" A_ : int = """classifier.bias""" return key_mapping def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]: """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue A_ : Optional[int] = key_mapping[key] if "_conv" in key and "kernel" in key: A_ : List[str] = torch.from_numpy(a_ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A_ : Tuple = torch.from_numpy(a_ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A_ : Dict = torch.from_numpy(np.transpose(a_ ) ) else: A_ : List[Any] = torch.from_numpy(a_ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(a_ ) @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Optional[Any]: """simple docstring""" A_ : Optional[int] = model_classes[model_name]( include_top=a_ , weights="""imagenet""" , input_tensor=a_ , input_shape=a_ , pooling=a_ , classes=1_0_0_0 , classifier_activation="""softmax""" , ) A_ : List[str] = original_model.trainable_variables A_ : Any = original_model.non_trainable_variables A_ : Any = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A_ : Dict = param.numpy() A_ : str = list(tf_params.keys() ) # Load HuggingFace model A_ : Union[str, Any] = get_efficientnet_config(a_ ) A_ : Tuple = EfficientNetForImageClassification(a_ ).eval() A_ : List[str] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) A_ : Optional[int] = rename_keys(a_ ) replace_params(a_ , a_ , a_ ) # Initialize preprocessor and preprocess input image A_ : Optional[Any] = convert_image_processor(a_ ) A_ : Any = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): A_ : Optional[Any] = hf_model(**a_ ) A_ : Any = outputs.logits.detach().numpy() # Original model inference A_ : Dict = False A_ : str = CONFIG_MAP[model_name]["""image_size"""] A_ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A_ : Optional[int] = image.img_to_array(a_ ) A_ : Union[str, Any] = np.expand_dims(a_ , axis=0 ) A_ : Dict = original_model.predict(a_ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(a_ , a_ , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(a_ ): os.mkdir(a_ ) # Save converted model and image processor hf_model.save_pretrained(a_ ) preprocessor.save_pretrained(a_ ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) A_ : List[str] = F"efficientnet-{model_name}" preprocessor.push_to_hub(a_ ) hf_model.push_to_hub(a_ ) if __name__ == "__main__": UpperCamelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCamelCase__ : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
344
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]: A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20} A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} A_ : Optional[Any] = parent A_ : Optional[int] = batch_size A_ : Union[str, Any] = num_channels A_ : str = image_size A_ : Tuple = min_resolution A_ : Dict = max_resolution A_ : str = do_resize A_ : Tuple = size A_ : int = do_center_crop A_ : Dict = crop_size A_ : Tuple = do_normalize A_ : List[str] = image_mean A_ : Optional[Any] = image_std A_ : Any = do_reduce_labels def UpperCAmelCase_ ( self ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def UpperCAmelCase ( ) -> List[str]: """simple docstring""" A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) A_ : Tuple = Image.open(dataset[0]["""file"""] ) A_ : Dict = Image.open(dataset[1]["""file"""] ) return image, map def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) A_ : Tuple = Image.open(ds[0]["""file"""] ) A_ : List[Any] = Image.open(ds[1]["""file"""] ) A_ : Any = Image.open(ds[2]["""file"""] ) A_ : str = Image.open(ds[3]["""file"""] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = BeitImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Dict: A_ : List[Any] = BeitImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Optional[int]: A_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase ) A_ : int = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> Dict: # Initialize image_processing A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ) -> List[str]: # Initialize image_processing A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ) -> str: # Initialize image_processing A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ) -> Optional[int]: # Initialize image_processing A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) A_ : Optional[int] = [] for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test batched A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test not batched input (PIL images) A_ , A_ : List[Any] = prepare_semantic_single_inputs() A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test batched input (PIL images) A_ , A_ : str = prepare_semantic_batch_inputs() A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 2, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) def UpperCAmelCase_ ( self ) -> Tuple: # Initialize image_processing A_ : Any = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 A_ , A_ : Tuple = prepare_semantic_single_inputs() A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 150 ) A_ : str = True A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 )
344
1
'''simple docstring''' import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : str = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n' class _lowerCAmelCase ( __A ): """simple docstring""" @add_start_docstrings(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool: raise NotImplementedError("""StoppingCriteria needs to be subclassed""" ) class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase = None ) -> str: A_ : Optional[int] = max_length A_ : int = max_position_embeddings @add_start_docstrings(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool: A_ : Any = input_ids.shape[-1] A_ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( """This is a friendly reminder - the current text generation call will exceed the model's predefined """ F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " """exceptions, performance degradation, or nothing at all.""" ) return is_done class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> int: warnings.warn( """The class `MaxNewTokensCriteria` is deprecated. """ F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " """with `max_length = start_length + max_new_tokens` instead.""" , _lowerCamelCase , ) A_ : List[str] = start_length A_ : List[Any] = max_new_tokens A_ : Union[str, Any] = start_length + max_new_tokens @add_start_docstrings(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool: return input_ids.shape[-1] >= self.max_length class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase = None ) -> str: A_ : Any = max_time A_ : str = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool: return time.time() - self.initial_timestamp > self.max_time class _lowerCAmelCase ( __A ): """simple docstring""" @add_start_docstrings(_lowerCamelCase ) def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool: return any(criteria(_lowerCamelCase , _lowerCamelCase ) for criteria in self ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: for stopping_criterium in self: if isinstance(_lowerCamelCase , _lowerCamelCase ): return stopping_criterium.max_length elif isinstance(_lowerCamelCase , _lowerCamelCase ): return stopping_criterium.max_length return None def UpperCAmelCase ( a_ , a_ ) -> StoppingCriteriaList: """simple docstring""" A_ : Any = stopping_criteria.max_length A_ : Optional[Any] = deepcopy(a_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , a_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=a_ ) ) return new_stopping_criteria
344
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _lowerCAmelCase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str: super().__init__() A_ : Optional[Any] = pad_token_id A_ : List[Any] = max_length A_ : str = vocab A_ : Union[str, Any] = merges A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int: A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()] A_ : Dict = tokenizer.get_vocab() return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str: A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) @classmethod def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]: return cls(**_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any: A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase ) A_ : Any = tf.ones_like(_lowerCamelCase ) if self.pad_token_id is not None: # pad the tokens up to max length A_ : List[Any] = max_length if max_length is not None else self.max_length if max_length is not None: A_ , A_ : Tuple = pad_model_inputs( _lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
344
1
'''simple docstring''' import os from pathlib import Path def UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" from torch.utils.cpp_extension import load A_ : Union[str, Any] = Path(a_ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" A_ : Dict = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , a_ , with_cuda=a_ , extra_include_paths=[str(a_ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
344
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ['YolosFeatureExtractor'] UpperCamelCase__ : int = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' from __future__ import annotations import pandas as pd def UpperCAmelCase ( a_ , a_ , a_ ) -> list[int]: """simple docstring""" A_ : Dict = [0] * no_of_processes A_ : List[str] = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(a_ ): A_ : Union[str, Any] = burst_time[i] A_ : Dict = 0 A_ : Optional[Any] = 0 A_ : Any = 9_9_9_9_9_9_9_9_9 A_ : Any = 0 A_ : List[str] = False # Process until all processes are completed while complete != no_of_processes: for j in range(a_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: A_ : Any = remaining_time[j] A_ : Optional[Any] = j A_ : Optional[int] = True if not check: increment_time += 1 continue remaining_time[short] -= 1 A_ : Optional[Any] = remaining_time[short] if minm == 0: A_ : Tuple = 9_9_9_9_9_9_9_9_9 if remaining_time[short] == 0: complete += 1 A_ : Optional[Any] = False # Find finish time of current process A_ : Optional[int] = increment_time + 1 # Calculate waiting time A_ : Optional[Any] = finish_time - arrival_time[short] A_ : Union[str, Any] = finar - burst_time[short] if waiting_time[short] < 0: A_ : List[str] = 0 # Increment time increment_time += 1 return waiting_time def UpperCAmelCase ( a_ , a_ , a_ ) -> list[int]: """simple docstring""" A_ : Optional[Any] = [0] * no_of_processes for i in range(a_ ): A_ : str = burst_time[i] + waiting_time[i] return turn_around_time def UpperCAmelCase ( a_ , a_ , a_ ) -> None: """simple docstring""" A_ : Optional[int] = 0 A_ : List[Any] = 0 for i in range(a_ ): A_ : List[Any] = total_waiting_time + waiting_time[i] A_ : Any = total_turn_around_time + turn_around_time[i] print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" ) print("""Average turn around time =""" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('Enter how many process you want to analyze') UpperCamelCase__ : List[str] = int(input()) UpperCamelCase__ : Optional[Any] = [0] * no_of_processes UpperCamelCase__ : int = [0] * no_of_processes UpperCamelCase__ : Any = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('Enter the arrival time and burst time for process:--' + str(i + 1)) UpperCamelCase__ , UpperCamelCase__ : str = map(int, input().split()) UpperCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes) UpperCamelCase__ : List[Any] = burst_time UpperCamelCase__ : List[str] = no_of_processes UpperCamelCase__ : Any = waiting_time UpperCamelCase__ : List[str] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) UpperCamelCase__ : Optional[int] = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ 'Process', 'BurstTime', 'ArrivalTime', 'WaitingTime', 'TurnAroundTime', ], ) # Printing the dataFrame pd.set_option('display.max_rows', fcfs.shape[0] + 1) print(fcfs)
344
'''simple docstring''' class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: A_ : Optional[Any] = name A_ : Dict = value A_ : Union[str, Any] = weight def __repr__( self ) -> List[str]: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def UpperCAmelCase_ ( self ) -> Optional[Any]: return self.value def UpperCAmelCase_ ( self ) -> List[str]: return self.name def UpperCAmelCase_ ( self ) -> Tuple: return self.weight def UpperCAmelCase_ ( self ) -> Optional[int]: return self.value / self.weight def UpperCAmelCase ( a_ , a_ , a_ ) -> str: """simple docstring""" A_ : Optional[int] = [] for i in range(len(a_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]: """simple docstring""" A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ ) A_ : str = [] A_ , A_ : Dict = 0.0, 0.0 for i in range(len(a_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
344
1
'''simple docstring''' from __future__ import annotations from random import choice def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" return choice(a_ ) def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" A_ : int = random_pivot(a_ ) # partition based on pivot # linear time A_ : str = [e for e in lst if e < pivot] A_ : Dict = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(a_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(a_ ) < k - 1: return kth_number(a_ , k - len(a_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod()
344
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def UpperCAmelCase ( a_ , a_ ) -> tuple: """simple docstring""" if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
344
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : str = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class _lowerCAmelCase ( __A ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any: A_ : List[Any] = parent A_ : int = config_class A_ : int = has_text_modality A_ : str = kwargs A_ : int = common_properties def UpperCAmelCase_ ( self ) -> str: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : Optional[int] = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" ) # Test that config has the common properties as setter for idx, name in enumerate(_lowerCamelCase ): try: setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_lowerCamelCase ): try: A_ : List[str] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = self.config_class(**self.inputs_dict ) A_ : Optional[int] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" ) config_first.to_json_file(_lowerCamelCase ) A_ : Dict = self.config_class.from_json_file(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Any = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Optional[int] = self.config_class(**self.inputs_dict ) A_ : List[Any] = """test""" with tempfile.TemporaryDirectory() as tmpdirname: A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase ) config_first.save_pretrained(_lowerCamelCase ) A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) A_ : str = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_class.is_composition: return A_ : Dict = self.config_class() self.parent.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: A_ : Any = copy.deepcopy(_lowerCamelCase ) A_ : Tuple = self.config_class(**_lowerCamelCase ) A_ : Optional[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(_lowerCamelCase , _lowerCamelCase ) != value: wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) ) if len(_lowerCamelCase ) > 0: A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] ) raise ValueError(F"The following keys were not properly set in the config:\n{errors}" ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
344
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , ) -> Dict: A_ : Optional[Any] = parent A_ : List[Any] = 13 A_ : List[Any] = 7 A_ : Dict = True A_ : List[str] = True A_ : Dict = True A_ : List[str] = 99 A_ : Union[str, Any] = 32 A_ : List[Any] = 2 A_ : Optional[int] = 4 A_ : Any = 37 A_ : int = """gelu""" A_ : Dict = 0.1 A_ : int = 0.1 A_ : str = 512 A_ : Tuple = 16 A_ : Tuple = 2 A_ : Dict = 0.02 A_ : Dict = 3 A_ : Dict = 4 A_ : Optional[Any] = None def UpperCAmelCase_ ( self ) -> Tuple: A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : List[str] = None if self.use_input_mask: A_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Any = None A_ : int = None A_ : Any = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[int] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self ) -> List[str]: ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Tuple = self.prepare_config_and_inputs() A_ : Union[str, Any] = True A_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: A_ : Any = TFEsmModel(config=_lowerCamelCase ) A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} A_ : Dict = model(_lowerCamelCase ) A_ : Optional[Any] = [input_ids, input_mask] A_ : Tuple = model(_lowerCamelCase ) A_ : Optional[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> str: A_ : str = True A_ : Dict = TFEsmModel(config=_lowerCamelCase ) A_ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """encoder_hidden_states""": encoder_hidden_states, """encoder_attention_mask""": encoder_attention_mask, } A_ : Union[str, Any] = model(_lowerCamelCase ) A_ : Union[str, Any] = [input_ids, input_mask] A_ : Optional[int] = model(_lowerCamelCase , encoder_hidden_states=_lowerCamelCase ) # Also check the case where encoder outputs are not passed A_ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: A_ : Dict = TFEsmForMaskedLM(config=_lowerCamelCase ) A_ : Optional[Any] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: A_ : str = self.num_labels A_ : Optional[Any] = TFEsmForTokenClassification(config=_lowerCamelCase ) A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} A_ : Optional[int] = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self ) -> int: A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _lowerCAmelCase ( __A, __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCamelCase = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : List[str] = TFEsmModelTester(self ) A_ : str = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self ) -> Any: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> int: A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Dict: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = TFEsmModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip("""Protein models do not support embedding resizing.""" ) def UpperCAmelCase_ ( self ) -> Dict: pass @unittest.skip("""Protein models do not support embedding resizing.""" ) def UpperCAmelCase_ ( self ) -> int: pass def UpperCAmelCase_ ( self ) -> List[Any]: A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : int = model_class(_lowerCamelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A_ : Any = model.get_bias() assert isinstance(_lowerCamelCase , _lowerCamelCase ) for k, v in name.items(): assert isinstance(_lowerCamelCase , tf.Variable ) else: A_ : Any = model.get_output_embeddings() assert x is None A_ : int = model.get_bias() assert name is None @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ) -> Any: A_ : Tuple = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) A_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ : str = model(_lowerCamelCase )[0] A_ : Optional[int] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _lowerCamelCase ) # compare the actual values for a slice. A_ : int = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def UpperCAmelCase_ ( self ) -> str: A_ : List[Any] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) A_ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A_ : Optional[Any] = model(_lowerCamelCase )[0] # compare the actual values for a slice. A_ : Union[str, Any] = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
344
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" try: with open(a_ , """rb""" ) as flax_state_f: A_ : Tuple = from_bytes(a_ , flax_state_f.read() ) except UnpicklingError as e: try: with open(a_ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(a_ , a_ ) def UpperCAmelCase ( a_ , a_ ) -> Any: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values() if any(a_ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) A_ : str = jax.tree_util.tree_map( lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ ) A_ : Any = """""" A_ : Optional[int] = flatten_dict(a_ , sep=""".""" ) A_ : List[str] = pt_model.state_dict() # keep track of unexpected & missing keys A_ : Union[str, Any] = [] A_ : Dict = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A_ : List[Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""] A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": A_ : int = flax_key_tuple_array[:-1] + ["""weight"""] A_ : Optional[int] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(a_ ): A_ : Tuple = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) A_ : Dict = """.""".join(a_ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor A_ : Tuple = torch.from_numpy(a_ ) # remove from missing keys missing_keys.remove(a_ ) else: # weight is not expected by PyTorch model unexpected_keys.append(a_ ) pt_model.load_state_dict(a_ ) # re-transform missing_keys to list A_ : Dict = list(a_ ) if len(a_ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(a_ ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) return pt_model
344
1
'''simple docstring''' def UpperCAmelCase ( a_ , a_ , a_ ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: A_ : List[str] = _modexpt(a_ , exponent // 2 , a_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(a_ , exponent - 1 , a_ )) % modulo_value def UpperCAmelCase ( a_ = 1_7_7_7 , a_ = 1_8_5_5 , a_ = 8 ) -> int: """simple docstring""" A_ : int = base for _ in range(1 , a_ ): A_ : Optional[Any] = _modexpt(a_ , a_ , 1_0**digits ) return result if __name__ == "__main__": print(f'{solution() = }')
344
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]: A_ : Any = parent A_ : List[Any] = batch_size A_ : List[Any] = image_size A_ : Optional[int] = num_channels A_ : Tuple = embeddings_size A_ : str = hidden_sizes A_ : Optional[Any] = depths A_ : Any = is_training A_ : int = use_labels A_ : int = hidden_act A_ : Optional[Any] = num_labels A_ : str = scope A_ : Optional[int] = len(_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Dict = None if self.use_labels: A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) A_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> Optional[Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: A_ : Dict = TFRegNetModel(config=_lowerCamelCase ) A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: A_ : Optional[Any] = self.num_labels A_ : int = TFRegNetForImageClassification(_lowerCamelCase ) A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ) -> str: A_ : Any = self.prepare_config_and_inputs() A_ , A_ , A_ : str = config_and_inputs A_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _lowerCAmelCase ( __A, __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Dict = TFRegNetModelTester(self ) A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> str: return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> Dict: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def UpperCAmelCase_ ( self ) -> int: super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def UpperCAmelCase_ ( self ) -> Optional[Any]: pass def UpperCAmelCase_ ( self ) -> int: A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(_lowerCamelCase ) A_ : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): A_ : Optional[int] = model_class(_lowerCamelCase ) A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase ) A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Dict = layer_type A_ : List[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : str = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Dict: A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ): A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple() def recursive_check(_lowerCamelCase , _lowerCamelCase ): if isinstance(_lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ): recursive_check(_lowerCamelCase , _lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=( """Tuple and dict output are not equal. Difference:""" F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}" ) , ) recursive_check(_lowerCamelCase , _lowerCamelCase ) for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(_lowerCamelCase ) A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} ) A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} ) def UpperCAmelCase_ ( self ) -> str: A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def UpperCAmelCase_ ( self ) -> Tuple: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ ( self ) -> int: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A_ : Tuple = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" ) # forward pass A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase ) # verify the logits A_ : Optional[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
344
1
'''simple docstring''' from math import factorial def UpperCAmelCase ( a_ = 1_0_0 ) -> int: """simple docstring""" return sum(map(a_ , str(factorial(a_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
344
'''simple docstring''' def UpperCAmelCase ( a_ = 1_0_0 ) -> int: """simple docstring""" A_ : Dict = n * (n + 1) * (2 * n + 1) / 6 A_ : Optional[int] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'{solution() = }')
344
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = KandinskyVaaControlnetImgaImgPipeline lowerCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] lowerCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] lowerCamelCase = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase = False @property def UpperCAmelCase_ ( self ) -> Optional[int]: return 32 @property def UpperCAmelCase_ ( self ) -> List[str]: return 32 @property def UpperCAmelCase_ ( self ) -> int: return self.time_input_dim @property def UpperCAmelCase_ ( self ) -> Dict: return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ) -> Tuple: return 100 @property def UpperCAmelCase_ ( self ) -> List[Any]: torch.manual_seed(0 ) A_ : Optional[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Dict = UNetaDConditionModel(**_lowerCamelCase ) return model @property def UpperCAmelCase_ ( self ) -> List[Any]: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ) -> List[Any]: torch.manual_seed(0 ) A_ : List[Any] = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ) -> Dict: A_ : str = self.dummy_unet A_ : Optional[Any] = self.dummy_movq A_ : List[Any] = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } A_ : Any = DDIMScheduler(**_lowerCamelCase ) A_ : str = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> Tuple: A_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image A_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : List[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" ).resize((256, 256) ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith("""mps""" ): A_ : List[str] = torch.manual_seed(_lowerCamelCase ) else: A_ : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) A_ : str = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Any = """cpu""" A_ : str = self.get_dummy_components() A_ : Dict = self.pipeline_class(**_lowerCamelCase ) A_ : Tuple = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) A_ : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) A_ : Optional[Any] = output.images A_ : Optional[int] = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] A_ : Optional[int] = image[0, -3:, -3:, -1] A_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array( [0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> Optional[int]: A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" ) A_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : List[str] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 255.0 A_ : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) A_ : int = """A robot, 4k photo""" A_ : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) A_ : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) A_ : Optional[Any] = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) A_ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) A_ , A_ : Optional[int] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.85 , generator=_lowerCamelCase , negative_prompt="""""" , ).to_tuple() A_ : Dict = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , ) A_ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
344
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCamelCase__ : int = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def UpperCAmelCase ( a_ , a_ ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" A_ : Optional[Any] = iter(a_ ) while True: A_ : List[str] = tuple(itertools.islice(a_ , a_ ) ) if not chunk: return yield chunk def UpperCAmelCase ( a_ ) -> str: """simple docstring""" A_ : List[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] ) A_ : Tuple = """""" if len(a_ ) < 2: return dirty for i in range(len(a_ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(a_ ) & 1: clean += "X" return clean def UpperCAmelCase ( a_ ) -> list[str]: """simple docstring""" A_ : Any = """ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler A_ : Optional[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(a_ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(a_ ) return table def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" A_ : int = generate_table(a_ ) A_ : List[str] = prepare_input(a_ ) A_ : Tuple = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_ , 2 ): A_ , A_ : Optional[int] = divmod(table.index(a_ ) , 5 ) A_ , A_ : str = divmod(table.index(a_ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" A_ : int = generate_table(a_ ) A_ : Union[str, Any] = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_ , 2 ): A_ , A_ : Union[str, Any] = divmod(table.index(a_ ) , 5 ) A_ , A_ : List[Any] = divmod(table.index(a_ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
344
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]: """simple docstring""" A_ : List[str] = [] A_ : Dict = [] A_ : List[Any] = [] for rt in rc.restypes: A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) A_ : Tuple = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : Optional[int] = torch.tensor( a_ , dtype=torch.intaa , device=protein["""aatype"""].device , ) A_ : List[Any] = torch.tensor( a_ , dtype=torch.floataa , device=protein["""aatype"""].device , ) A_ : Optional[int] = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein A_ : Dict = restype_atomaa_to_atomaa[protein_aatype] A_ : Optional[Any] = restype_atomaa_mask[protein_aatype] A_ : Any = residx_atomaa_mask A_ : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype] A_ : Tuple = residx_atomaa_to_atomaa.long() # create the corresponding mask A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): A_ : Optional[Any] = rc.restype_atoa[restype_letter] A_ : Any = rc.residue_atoms[restype_name] for atom_name in atom_names: A_ : Any = rc.atom_order[atom_name] A_ : Optional[int] = 1 A_ : Optional[int] = restype_atomaa_mask[protein_aatype] A_ : Dict = residx_atomaa_mask return protein def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]: """simple docstring""" A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray ) A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) ) return out
344
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( __A ): """simple docstring""" lowerCamelCase = (PNDMScheduler,) lowerCamelCase = (('''num_inference_steps''', 50),) def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Dict: A_ : Any = { """num_train_timesteps""": 1000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_lowerCamelCase ) return config def UpperCAmelCase_ ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> str: A_ : Dict = dict(self.forward_default_kwargs ) A_ : Any = kwargs.pop("""num_inference_steps""" , _lowerCamelCase ) A_ : Optional[int] = self.dummy_sample A_ : List[Any] = 0.1 * sample A_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A_ : str = self.get_scheduler_config(**_lowerCamelCase ) A_ : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals A_ : int = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCamelCase ) A_ : List[Any] = scheduler_class.from_pretrained(_lowerCamelCase ) new_scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals A_ : Optional[int] = dummy_past_residuals[:] A_ : Union[str, Any] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : Tuple = new_scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A_ : Tuple = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[str] = new_scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> List[Any]: A_ : Optional[Any] = dict(self.forward_default_kwargs ) A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase ) A_ : str = self.dummy_sample A_ : Tuple = 0.1 * sample A_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A_ : int = self.get_scheduler_config() A_ : List[str] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A_ : List[str] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCamelCase ) A_ : int = scheduler_class.from_pretrained(_lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A_ : List[str] = dummy_past_residuals[:] A_ : Union[str, Any] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : Tuple = new_scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" A_ : List[Any] = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[Any] = new_scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> int: A_ : Union[str, Any] = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config(**_lowerCamelCase ) A_ : Dict = scheduler_class(**_lowerCamelCase ) A_ : str = 10 A_ : Any = self.dummy_model() A_ : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(_lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A_ : List[str] = model(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A_ : Dict = model(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample return sample def UpperCAmelCase_ ( self ) -> Tuple: A_ : Dict = dict(self.forward_default_kwargs ) A_ : Any = kwargs.pop("""num_inference_steps""" , _lowerCamelCase ) for scheduler_class in self.scheduler_classes: A_ : Tuple = self.get_scheduler_config() A_ : Any = scheduler_class(**_lowerCamelCase ) A_ : Any = self.dummy_sample A_ : int = 0.1 * sample if num_inference_steps is not None and hasattr(_lowerCamelCase , """set_timesteps""" ): scheduler.set_timesteps(_lowerCamelCase ) elif num_inference_steps is not None and not hasattr(_lowerCamelCase , """set_timesteps""" ): A_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A_ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A_ : List[Any] = dummy_past_residuals[:] A_ : Tuple = scheduler.step_prk(_lowerCamelCase , 0 , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : Optional[int] = scheduler.step_prk(_lowerCamelCase , 1 , _lowerCamelCase , **_lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A_ : Any = scheduler.step_plms(_lowerCamelCase , 0 , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[str] = scheduler.step_plms(_lowerCamelCase , 1 , _lowerCamelCase , **_lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowerCamelCase ) A_ : int = self.scheduler_classes[0] A_ : List[Any] = self.get_scheduler_config(steps_offset=1 ) A_ : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Any: for t in [1, 5, 10]: self.check_over_forward(time_step=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> int: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 A_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: A_ : Any = self.dummy_sample A_ : Tuple = 0.1 * sample A_ : int = self.get_scheduler_config() A_ : List[str] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A_ : str = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample def UpperCAmelCase_ ( self ) -> Any: with self.assertRaises(_lowerCamelCase ): A_ : Optional[int] = self.scheduler_classes[0] A_ : Optional[int] = self.get_scheduler_config() A_ : Optional[int] = scheduler_class(**_lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Union[str, Any] = self.full_loop() A_ : int = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : Union[str, Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" ) A_ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def UpperCAmelCase_ ( self ) -> List[str]: # We specify different beta, so that the first alpha is 0.99 A_ : List[Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 ) A_ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def UpperCAmelCase_ ( self ) -> Any: # We specify different beta, so that the first alpha is 0.99 A_ : str = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 ) A_ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
344
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str: A_ : Optional[int] = parent A_ : Dict = batch_size A_ : List[Any] = image_size A_ : Optional[int] = patch_size A_ : List[str] = num_channels A_ : List[Any] = is_training A_ : Union[str, Any] = use_labels A_ : Union[str, Any] = hidden_size A_ : str = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Union[str, Any] = intermediate_size A_ : Any = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : Dict = type_sequence_label_size A_ : Optional[int] = initializer_range A_ : str = scope A_ : Optional[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A_ : Tuple = (image_size // patch_size) ** 2 A_ : Union[str, Any] = num_patches + 2 def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Dict = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> int: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: A_ : List[str] = DeiTModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Dict = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ : Dict = 1 A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : int = model(_lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: A_ : Tuple = self.type_sequence_label_size A_ : Tuple = DeiTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Dict = 1 A_ : Any = DeiTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : List[Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __A, __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowerCamelCase = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : int = DeiTModelTester(self ) A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> Optional[int]: pass def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Union[str, Any] = [*signature.parameters.keys()] A_ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]: A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase_ ( self ) -> Optional[Any]: if not self.model_tester.is_training: return A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_lowerCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue A_ : List[str] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : List[str] = model(**_lowerCamelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> int: A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A_ : Any = False A_ : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue A_ : List[Any] = model_class(_lowerCamelCase ) model.gradient_checkpointing_enable() model.to(_lowerCamelCase ) model.train() A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) A_ : Union[str, Any] = model(**_lowerCamelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> Tuple: A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Optional[Any] = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_lowerCamelCase ), *get_values(_lowerCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ): A_ : Dict = problem_type["""title"""] A_ : List[Any] = problem_type["""num_labels"""] A_ : List[str] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) if problem_type["num_labels"] > 1: A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list: A_ : List[str] = model(**_lowerCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def UpperCAmelCase_ ( self ) -> Tuple: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : int = DeiTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ ( self ) -> Optional[Any]: return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ) -> Tuple: A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( _lowerCamelCase ) A_ : Optional[int] = self.default_image_processor A_ : str = prepare_img() A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Any = model(**_lowerCamelCase ) # verify the logits A_ : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase_ ( self ) -> Tuple: A_ : Optional[Any] = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) A_ : int = self.default_image_processor A_ : List[str] = prepare_img() A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ) A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A_ : List[Any] = model(_lowerCamelCase )
344
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : Any = { 'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'], 'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ['BertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLayer', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBertEmbeddings', 'TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = ['TFBertTokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = [ 'FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]: A_ : Optional[int] = parent A_ : Union[str, Any] = do_resize A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288} A_ : Tuple = size_divisor A_ : List[Any] = do_rescale A_ : Dict = rescale_factor A_ : List[Any] = do_normalize A_ : Dict = do_center_crop A_ : Optional[Any] = image_mean A_ : List[str] = image_std A_ : str = do_pad A_ : Any = batch_size A_ : List[str] = num_channels A_ : List[str] = min_resolution A_ : Union[str, Any] = max_resolution def UpperCAmelCase_ ( self ) -> Any: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]: if not batched: A_ : Union[str, Any] = self.size["""shortest_edge"""] A_ : Dict = image_inputs[0] if isinstance(_lowerCamelCase , Image.Image ): A_ , A_ : Optional[Any] = image.size else: A_ , A_ : int = image.shape[1], image.shape[2] A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase ) if h < w: A_ , A_ : Optional[Any] = size, scale * w else: A_ , A_ : Dict = scale * h, size A_ : Union[str, Any] = int((1333 / 800) * size ) if max(_lowerCamelCase , _lowerCamelCase ) > max_size: A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase ) A_ : Dict = newh * scale A_ : Dict = neww * scale A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 ) A_ , A_ : Dict = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: A_ : Tuple = [] for image in image_inputs: A_ , A_ : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0] A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowerCAmelCase ( __A, unittest.TestCase ): """simple docstring""" lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ) -> Dict: A_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> List[str]: # Initialize image processor A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: # Initialize image processor A_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self ) -> Tuple: # Initialize image processor A_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
344
1
'''simple docstring''' UpperCamelCase__ : Union[str, Any] = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
344
'''simple docstring''' def UpperCAmelCase ( a_ , a_ ) -> Optional[int]: """simple docstring""" print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(a_ ): for j in range(a_ ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def UpperCAmelCase ( a_ , a_ ) -> Tuple: """simple docstring""" A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )] for i in range(a_ ): for j in range(a_ ): A_ : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(a_ ): # looping through rows of graph array for i in range(a_ ): # looping through columns of graph array for j in range(a_ ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): A_ : List[str] = dist[i][k] + dist[k][j] _print_dist(a_ , a_ ) return dist, v if __name__ == "__main__": UpperCamelCase__ : Tuple = int(input('Enter number of vertices: ')) UpperCamelCase__ : int = int(input('Enter number of edges: ')) UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): UpperCamelCase__ : Union[str, Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) UpperCamelCase__ : Union[str, Any] = int(input('Enter source:')) UpperCamelCase__ : int = int(input('Enter destination:')) UpperCamelCase__ : Optional[Any] = float(input('Enter weight:')) UpperCamelCase__ : Any = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
344
1
'''simple docstring''' from math import factorial def UpperCAmelCase ( a_ = 1_0_0 ) -> int: """simple docstring""" return sum(int(a_ ) for x in str(factorial(a_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
344
'''simple docstring''' import datasets from .evaluate import evaluate UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} A_ : List[Any] = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase ) return score
344
1